code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
'''simple docstring''' import math from dataclasses import dataclass from typing import Optional, Tuple, Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin, SchedulerOutput @dataclass class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ): lowercase__ = 42 lowercase__ = 42 class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , __UpperCamelCase ): lowercase__ = 1 @register_to_config def __init__( self , __UpperCamelCase = 2000 , __UpperCamelCase = 0.1_5 , __UpperCamelCase = 0.0_1 , __UpperCamelCase = 1348.0 , __UpperCamelCase = 1E-5 , __UpperCamelCase = 1 , ): '''simple docstring''' __a : Any = sigma_max # setable values __a : List[str] = None self.set_sigmas(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None ): '''simple docstring''' return sample def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None ): '''simple docstring''' __a : str = sampling_eps if sampling_eps is not None else self.config.sampling_eps __a : List[Any] = torch.linspace(1 , __UpperCamelCase , __UpperCamelCase , device=__UpperCamelCase ) def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None ): '''simple docstring''' __a : Union[str, Any] = sigma_min if sigma_min is not None else self.config.sigma_min __a : Dict = sigma_max if sigma_max is not None else self.config.sigma_max __a : Optional[Any] = sampling_eps if sampling_eps is not None else self.config.sampling_eps if self.timesteps is None: self.set_timesteps(__UpperCamelCase , __UpperCamelCase ) __a : Optional[int] = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps) __a : Union[str, Any] = torch.exp(torch.linspace(math.log(__UpperCamelCase ) , math.log(__UpperCamelCase ) , __UpperCamelCase ) ) __a : Dict = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] ) def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' return torch.where( timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , ) def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = True , ): '''simple docstring''' if self.timesteps is None: raise ValueError( """`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" ) __a : Union[str, Any] = timestep * torch.ones( sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0]) __a : Tuple = (timestep * (len(self.timesteps ) - 1)).long() # mps requires indices to be in the same device, so we use cpu as is the default with cuda __a : Tuple = timesteps.to(self.discrete_sigmas.device ) __a : Dict = self.discrete_sigmas[timesteps].to(sample.device ) __a : Dict = self.get_adjacent_sigma(__UpperCamelCase , __UpperCamelCase ).to(sample.device ) __a : Optional[int] = torch.zeros_like(__UpperCamelCase ) __a : Union[str, Any] = (sigma**2 - adjacent_sigma**2) ** 0.5 # equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x) # also equation 47 shows the analog from SDE models to ancestral sampling methods __a : Dict = diffusion.flatten() while len(diffusion.shape ) < len(sample.shape ): __a : Tuple = diffusion.unsqueeze(-1 ) __a : Any = drift - diffusion**2 * model_output # equation 6: sample noise for the diffusion term of __a : Any = randn_tensor( sample.shape , layout=sample.layout , generator=__UpperCamelCase , device=sample.device , dtype=sample.dtype ) __a : Union[str, Any] = sample - drift # subtract because `dt` is a small negative timestep # TODO is the variable diffusion the correct scaling term for the noise? __a : int = prev_sample_mean + diffusion * noise # add impact of diffusion field g if not return_dict: return (prev_sample, prev_sample_mean) return SdeVeOutput(prev_sample=__UpperCamelCase , prev_sample_mean=__UpperCamelCase ) def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = True , ): '''simple docstring''' if self.timesteps is None: raise ValueError( """`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" ) # For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z" # sample noise for correction __a : Any = randn_tensor(sample.shape , layout=sample.layout , generator=__UpperCamelCase ).to(sample.device ) # compute step size from the model_output, the noise, and the snr __a : Any = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean() __a : Optional[int] = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean() __a : Any = (self.config.snr * noise_norm / grad_norm) ** 2 * 2 __a : List[Any] = step_size * torch.ones(sample.shape[0] ).to(sample.device ) # self.repeat_scalar(step_size, sample.shape[0]) # compute corrected sample: model_output term and noise term __a : int = step_size.flatten() while len(step_size.shape ) < len(sample.shape ): __a : Union[str, Any] = step_size.unsqueeze(-1 ) __a : Any = sample + step_size * model_output __a : List[Any] = prev_sample_mean + ((step_size * 2) ** 0.5) * noise if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=__UpperCamelCase ) def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ): '''simple docstring''' __a : Dict = timesteps.to(original_samples.device ) __a : str = self.discrete_sigmas.to(original_samples.device )[timesteps] __a : Tuple = ( noise * sigmas[:, None, None, None] if noise is not None else torch.randn_like(__UpperCamelCase ) * sigmas[:, None, None, None] ) __a : Any = noise + original_samples return noisy_samples def __len__( self ): '''simple docstring''' return self.config.num_train_timesteps
715
'''simple docstring''' import qiskit def _snake_case ( lowercase , lowercase ) -> qiskit.result.counts.Counts: __a : Any = qiskit.Aer.get_backend("""aer_simulator""" ) # Create a Quantum Circuit acting on the q register __a : str = qiskit.QuantumCircuit(lowercase , lowercase ) # Map the quantum measurement to the classical bits circuit.measure([0] , [0] ) # Execute the circuit on the simulator __a : Any = qiskit.execute(lowercase , lowercase , shots=1_0_0_0 ) # Return the histogram data of the results of the experiment. return job.result().get_counts(lowercase ) if __name__ == "__main__": print(f'''Total count for various states are: {single_qubit_measure(1, 1)}''')
697
0
'''simple docstring''' import pytest __SCREAMING_SNAKE_CASE : Any = '__dummy_dataset1__' __SCREAMING_SNAKE_CASE : List[Any] = '\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"\nURLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n "tokens": datasets.Sequence(datasets.Value("string")),\n "ner_tags": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n "O",\n "B-PER",\n "I-PER",\n "B-ORG",\n "I-ORG",\n "B-LOC",\n "I-LOC",\n ]\n )\n ),\n "langs": datasets.Sequence(datasets.Value("string")),\n "spans": datasets.Sequence(datasets.Value("string")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, "r", encoding="utf-8") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n' @pytest.fixture def _snake_case ( ) -> Tuple: return DATASET_LOADING_SCRIPT_NAME @pytest.fixture def _snake_case ( ) -> Optional[Any]: return DATASET_LOADING_SCRIPT_CODE @pytest.fixture def _snake_case ( lowercase , lowercase , lowercase ) -> List[Any]: __a : Dict = dataset_loading_script_name __a : Any = tmp_path / """datasets""" / script_name script_dir.mkdir(parents=lowercase ) __a : Union[str, Any] = script_dir / F"""{script_name}.py""" with open(lowercase , """w""" ) as f: f.write(lowercase ) return str(lowercase )
716
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConformerConfig, WavaVecaConformerForCTC, WavaVecaConformerForPreTraining, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() __SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : Any = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.linear_k': 'encoder.layers.*.self_attn.linear_k', 'self_attn.linear_v': 'encoder.layers.*.self_attn.linear_v', 'self_attn.linear_q': 'encoder.layers.*.self_attn.linear_q', 'self_attn.pos_bias_u': 'encoder.layers.*.self_attn.pos_bias_u', 'self_attn.pos_bias_v': 'encoder.layers.*.self_attn.pos_bias_v', 'self_attn.linear_out': 'encoder.layers.*.self_attn.linear_out', 'self_attn.linear_pos': 'encoder.layers.*.self_attn.linear_pos', 'self_attn.rotary_emb': 'encoder.embed_positions', 'self_attn_layer_norm': 'encoder.layers.*.self_attn_layer_norm', 'conv_module.pointwise_conv1': 'encoder.layers.*.conv_module.pointwise_conv1', 'conv_module.pointwise_conv2': 'encoder.layers.*.conv_module.pointwise_conv2', 'conv_module.depthwise_conv': 'encoder.layers.*.conv_module.depthwise_conv', 'conv_module.batch_norm': 'encoder.layers.*.conv_module.batch_norm', 'conv_module.layer_norm': 'encoder.layers.*.conv_module.layer_norm', 'ffn1.w_1': 'encoder.layers.*.ffn1.intermediate_dense', 'ffn1.w_2': 'encoder.layers.*.ffn1.output_dense', 'ffn1.layer_norm': 'encoder.layers.*.ffn1_layer_norm', 'ffn2.w_1': 'encoder.layers.*.ffn2.intermediate_dense', 'ffn2.w_2': 'encoder.layers.*.ffn2.output_dense', 'ffn2.layer_norm': 'encoder.layers.*.ffn2_layer_norm', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', } __SCREAMING_SNAKE_CASE : Optional[Any] = [ 'lm_head', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', ] def _snake_case ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[Any]: for attribute in key.split(""".""" ): __a : str = getattr(lowercase , lowercase ) if weight_type is not None: __a : Dict = getattr(lowercase , lowercase ).shape else: __a : Dict = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": __a : Any = value elif weight_type == "weight_g": __a : int = value elif weight_type == "weight_v": __a : int = value elif weight_type == "bias": __a : List[Any] = value elif weight_type == "running_mean": __a : Union[str, Any] = value elif weight_type == "running_var": __a : Tuple = value elif weight_type == "num_batches_tracked": __a : Optional[int] = value elif weight_type == "inv_freq": __a : List[str] = value else: __a : List[str] = value logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def _snake_case ( lowercase , lowercase , lowercase ) -> Dict: __a : Dict = [] __a : Dict = fairseq_model.state_dict() __a : Tuple = hf_model.wavaveca_conformer.feature_extractor for name, value in fairseq_dict.items(): __a : int = False if "conv_layers" in name: load_conv_layer( lowercase , lowercase , lowercase , lowercase , hf_model.config.feat_extract_norm == """group""" , ) __a : List[Any] = True else: for key, mapped_key in MAPPING.items(): __a : Optional[int] = """wav2vec2_conformer.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: __a : str = True if "*" in mapped_key: __a : Optional[int] = name.split(lowercase )[0].split(""".""" )[-2] __a : List[Any] = mapped_key.replace("""*""" , lowercase ) if "pos_bias_u" in name: __a : Union[str, Any] = None elif "pos_bias_v" in name: __a : List[Any] = None elif "weight_g" in name: __a : List[Any] = """weight_g""" elif "weight_v" in name: __a : List[Any] = """weight_v""" elif "bias" in name: __a : Optional[int] = """bias""" elif "weight" in name: # TODO: don't match quantizer.weight_proj __a : str = """weight""" elif "running_mean" in name: __a : List[str] = """running_mean""" elif "inv_freq" in name: __a : Dict = """inv_freq""" elif "running_var" in name: __a : Union[str, Any] = """running_var""" elif "num_batches_tracked" in name: __a : int = """num_batches_tracked""" else: __a : Optional[int] = None set_recursively(lowercase , lowercase , lowercase , lowercase , lowercase ) continue if not is_used: unused_weights.append(lowercase ) logger.warning(F"""Unused weights: {unused_weights}""" ) def _snake_case ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[str]: __a : Optional[Any] = full_name.split("""conv_layers.""" )[-1] __a : Union[str, Any] = name.split(""".""" ) __a : Optional[Any] = int(items[0] ) __a : int = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) __a : Dict = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) __a : str = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" ) __a : Dict = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" ) __a : Union[str, Any] = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(lowercase ) @torch.no_grad() def _snake_case ( lowercase , lowercase , lowercase=None , lowercase=None , lowercase=True ) -> Optional[Any]: if config_path is not None: __a : Any = WavaVecaConformerConfig.from_pretrained(lowercase , hidden_act="""swish""" ) else: __a : Optional[int] = WavaVecaConformerConfig() if "rope" in checkpoint_path: __a : Optional[Any] = """rotary""" if is_finetuned: if dict_path: __a : List[Any] = Dictionary.load(lowercase ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq __a : int = target_dict.pad_index __a : List[str] = target_dict.bos_index __a : str = target_dict.eos_index __a : Dict = len(target_dict.symbols ) __a : Any = os.path.join(lowercase , """vocab.json""" ) if not os.path.isdir(lowercase ): logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(lowercase ) ) return os.makedirs(lowercase , exist_ok=lowercase ) __a : Dict = target_dict.indices # fairseq has the <pad> and <s> switched __a : Optional[Any] = 0 __a : List[Any] = 1 with open(lowercase , """w""" , encoding="""utf-8""" ) as vocab_handle: json.dump(lowercase , lowercase ) __a : int = WavaVecaCTCTokenizer( lowercase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=lowercase , ) __a : Optional[int] = True if config.feat_extract_norm == """layer""" else False __a : Dict = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=lowercase , return_attention_mask=lowercase , ) __a : str = WavaVecaProcessor(feature_extractor=lowercase , tokenizer=lowercase ) processor.save_pretrained(lowercase ) __a : List[str] = WavaVecaConformerForCTC(lowercase ) else: __a : Optional[int] = WavaVecaConformerForPreTraining(lowercase ) if is_finetuned: __a , __a , __a : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) else: __a : Optional[int] = argparse.Namespace(task="""audio_pretraining""" ) __a : Tuple = fairseq.tasks.setup_task(lowercase ) __a , __a , __a : int = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowercase ) __a : Any = model[0].eval() recursively_load_weights(lowercase , lowercase , not is_finetuned ) hf_wavavec.save_pretrained(lowercase ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not' ) __SCREAMING_SNAKE_CASE : int = parser.parse_args() convert_wavaveca_conformer_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
697
0
'''simple docstring''' import math def _snake_case ( lowercase ) -> bool: if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(lowercase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def _snake_case ( lowercase = 1_0_0_0_1 ) -> int: try: __a : Union[str, Any] = int(lowercase ) except (TypeError, ValueError): raise TypeError("""Parameter nth must be int or castable to int.""" ) from None if nth <= 0: raise ValueError("""Parameter nth must be greater than or equal to one.""" ) __a : list[int] = [] __a : List[str] = 2 while len(lowercase ) < nth: if is_prime(lowercase ): primes.append(lowercase ) num += 1 else: num += 1 return primes[len(lowercase ) - 1] if __name__ == "__main__": print(f'''{solution() = }''')
717
'''simple docstring''' import warnings from functools import wraps from typing import Callable def _snake_case ( lowercase ) -> Callable: @wraps(lowercase ) def _inner_fn(*lowercase , **lowercase ): warnings.warn( (F"""'{fn.__name__}' is experimental and might be subject to breaking changes in the future.""") , lowercase , ) return fn(*lowercase , **lowercase ) return _inner_fn
697
0
'''simple docstring''' import argparse import torch from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def _snake_case ( lowercase , lowercase , lowercase ) -> Any: # Construct model if gpta_config_file == "": __a : Dict = GPTaConfig() else: __a : Optional[Any] = GPTaConfig.from_json_file(lowercase ) __a : Union[str, Any] = GPTaModel(lowercase ) # Load weights from numpy load_tf_weights_in_gpta(lowercase , lowercase , lowercase ) # Save pytorch-model __a : Optional[int] = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME __a : Dict = pytorch_dump_folder_path + """/""" + CONFIG_NAME print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" ) torch.save(model.state_dict() , lowercase ) print(F"""Save configuration file to {pytorch_config_dump_path}""" ) with open(lowercase , """w""" , encoding="""utf-8""" ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--gpt2_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument( '--gpt2_config_file', default='', type=str, help=( 'An optional config json file corresponding to the pre-trained OpenAI model. \n' 'This specifies the model architecture.' ), ) __SCREAMING_SNAKE_CASE : Dict = parser.parse_args() convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
718
'''simple docstring''' from typing import List, Optional, Union import numpy as np from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function from ....feature_extraction_sequence_utils import SequenceFeatureExtractor from ....feature_extraction_utils import BatchFeature from ....file_utils import PaddingStrategy, TensorType from ....utils import logging __SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ): lowercase__ = ["input_features", "attention_mask"] def __init__( self , __UpperCamelCase=80 , __UpperCamelCase=1_6000 , __UpperCamelCase=0.0 , __UpperCamelCase=10 , __UpperCamelCase=25 , __UpperCamelCase="hamming_window" , __UpperCamelCase=3_2_7_6_8.0 , __UpperCamelCase=0.9_7 , __UpperCamelCase=1.0 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=False , **__UpperCamelCase , ): '''simple docstring''' super().__init__(feature_size=__UpperCamelCase , sampling_rate=__UpperCamelCase , padding_value=__UpperCamelCase , **__UpperCamelCase ) __a : List[str] = feature_size __a : List[str] = sampling_rate __a : int = padding_value __a : Any = hop_length __a : int = win_length __a : Tuple = frame_signal_scale __a : Union[str, Any] = preemphasis_coeff __a : List[str] = mel_floor __a : Union[str, Any] = normalize_means __a : Optional[Any] = normalize_vars __a : Optional[Any] = win_function __a : Union[str, Any] = return_attention_mask __a : List[Any] = win_length * sampling_rate // 1000 __a : List[Any] = hop_length * sampling_rate // 1000 __a : Optional[Any] = optimal_fft_length(self.sample_size ) __a : Any = (self.n_fft // 2) + 1 def __lowerCamelCase ( self , __UpperCamelCase ): '''simple docstring''' if self.win_function == "hamming_window": __a : str = window_function(window_length=self.sample_size , name=self.win_function , periodic=__UpperCamelCase ) else: __a : Dict = window_function(window_length=self.sample_size , name=self.win_function ) __a : Optional[Any] = mel_filter_bank( num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , ) __a : Any = spectrogram( one_waveform * self.frame_signal_scale , window=__UpperCamelCase , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=__UpperCamelCase , preemphasis=self.preemphasis_coeff , mel_filters=__UpperCamelCase , mel_floor=self.mel_floor , log_mel="""log""" , ) return msfc_features.T def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' if self.normalize_means: __a : int = x[:input_length].mean(axis=0 ) __a : str = np.subtract(__UpperCamelCase , __UpperCamelCase ) if self.normalize_vars: __a : Dict = x[:input_length].std(axis=0 ) __a : Dict = np.divide(__UpperCamelCase , __UpperCamelCase ) if input_length < x.shape[0]: __a : Union[str, Any] = padding_value # make sure array is in float32 __a : Any = x.astype(np.floataa ) return x def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None ): '''simple docstring''' __a : Tuple = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [self._normalize_one(__UpperCamelCase , __UpperCamelCase , self.padding_value ) for x, n in zip(__UpperCamelCase , __UpperCamelCase )] def __call__( self , __UpperCamelCase , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , ): '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of""" f""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with""" f""" {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( """It is strongly recommended to pass the ``sampling_rate`` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""" ) __a : Tuple = isinstance(__UpperCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" ) __a : Tuple = is_batched_numpy or ( isinstance(__UpperCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: __a : Tuple = [np.asarray(__UpperCamelCase , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(__UpperCamelCase , np.ndarray ): __a : List[str] = np.asarray(__UpperCamelCase , dtype=np.floataa ) elif isinstance(__UpperCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): __a : str = raw_speech.astype(np.floataa ) # always return batch if not is_batched: __a : Any = [raw_speech] # extract fbank features __a : str = [self._extract_mfsc_features(__UpperCamelCase ) for one_waveform in raw_speech] # convert into correct format for padding __a : Optional[Any] = BatchFeature({"""input_features""": features} ) __a : Any = self.pad( __UpperCamelCase , padding=__UpperCamelCase , max_length=__UpperCamelCase , truncation=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_attention_mask=__UpperCamelCase , **__UpperCamelCase , ) # make sure list is in array format __a : int = padded_inputs.get("""input_features""" ) if isinstance(input_features[0] , __UpperCamelCase ): __a : Union[str, Any] = [np.asarray(__UpperCamelCase , dtype=np.floataa ) for feature in input_features] __a : List[str] = padded_inputs.get("""attention_mask""" ) if attention_mask is not None: __a : Optional[int] = [np.asarray(__UpperCamelCase , dtype=np.intaa ) for array in attention_mask] if self.normalize_means or self.normalize_vars: __a : Optional[Any] = ( np.array(__UpperCamelCase , dtype=np.intaa ) if self._get_padding_strategies(__UpperCamelCase , max_length=__UpperCamelCase ) is not PaddingStrategy.DO_NOT_PAD and padding else None ) __a : int = self.normalize( padded_inputs["""input_features"""] , attention_mask=__UpperCamelCase ) if return_tensors is not None: __a : List[Any] = padded_inputs.convert_to_tensors(__UpperCamelCase ) return padded_inputs
697
0
'''simple docstring''' from typing import Dict, Optional import numpy as np import datasets __SCREAMING_SNAKE_CASE : List[str] = '\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n' __SCREAMING_SNAKE_CASE : Tuple = '\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric("mean_iou")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n' __SCREAMING_SNAKE_CASE : int = '\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}' def _snake_case ( lowercase , lowercase , lowercase , lowercase , lowercase = None , lowercase = False , ) -> Any: if label_map is not None: for old_id, new_id in label_map.items(): __a : List[str] = new_id # turn into Numpy arrays __a : Any = np.array(lowercase ) __a : str = np.array(lowercase ) if reduce_labels: __a : Tuple = 2_5_5 __a : Optional[int] = label - 1 __a : Union[str, Any] = 2_5_5 __a : Union[str, Any] = label != ignore_index __a : Any = np.not_equal(lowercase , lowercase ) __a : Optional[Any] = pred_label[mask] __a : Optional[int] = np.array(lowercase )[mask] __a : Any = pred_label[pred_label == label] __a : Optional[Any] = np.histogram(lowercase , bins=lowercase , range=(0, num_labels - 1) )[0] __a : Tuple = np.histogram(lowercase , bins=lowercase , range=(0, num_labels - 1) )[0] __a : str = np.histogram(lowercase , bins=lowercase , range=(0, num_labels - 1) )[0] __a : str = area_pred_label + area_label - area_intersect return area_intersect, area_union, area_pred_label, area_label def _snake_case ( lowercase , lowercase , lowercase , lowercase , lowercase = None , lowercase = False , ) -> Tuple: __a : List[Any] = np.zeros((num_labels,) , dtype=np.floataa ) __a : Optional[Any] = np.zeros((num_labels,) , dtype=np.floataa ) __a : int = np.zeros((num_labels,) , dtype=np.floataa ) __a : str = np.zeros((num_labels,) , dtype=np.floataa ) for result, gt_seg_map in zip(lowercase , lowercase ): __a : str = intersect_and_union( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) total_area_intersect += area_intersect total_area_union += area_union total_area_pred_label += area_pred_label total_area_label += area_label return total_area_intersect, total_area_union, total_area_pred_label, total_area_label def _snake_case ( lowercase , lowercase , lowercase , lowercase , lowercase = None , lowercase = None , lowercase = False , ) -> str: __a : Dict = total_intersect_and_union( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) # compute metrics __a : Tuple = {} __a : str = total_area_intersect.sum() / total_area_label.sum() __a : str = total_area_intersect / total_area_union __a : Optional[Any] = total_area_intersect / total_area_label __a : List[str] = np.nanmean(lowercase ) __a : str = np.nanmean(lowercase ) __a : Optional[Any] = all_acc __a : List[str] = iou __a : Any = acc if nan_to_num is not None: __a : List[Any] = {metric: np.nan_to_num(lowercase , nan=lowercase ) for metric, metric_value in metrics.items()} return metrics @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class SCREAMING_SNAKE_CASE__ ( datasets.Metric ): def __lowerCamelCase ( self ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( # 1st Seq - height dim, 2nd - width dim { """predictions""": datasets.Sequence(datasets.Sequence(datasets.Value("""uint16""" ) ) ), """references""": datasets.Sequence(datasets.Sequence(datasets.Value("""uint16""" ) ) ), } ) , reference_urls=[ """https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py""" ] , ) def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , ): '''simple docstring''' __a : Any = mean_iou( results=__UpperCamelCase , gt_seg_maps=__UpperCamelCase , num_labels=__UpperCamelCase , ignore_index=__UpperCamelCase , nan_to_num=__UpperCamelCase , label_map=__UpperCamelCase , reduce_labels=__UpperCamelCase , ) return iou_result
719
'''simple docstring''' __SCREAMING_SNAKE_CASE : int = 9.80_665 def _snake_case ( lowercase , lowercase , lowercase = g ) -> float: if fluid_density <= 0: raise ValueError("""Impossible fluid density""" ) if volume < 0: raise ValueError("""Impossible Object volume""" ) if gravity <= 0: raise ValueError("""Impossible Gravity""" ) return fluid_density * gravity * volume if __name__ == "__main__": import doctest # run doctest doctest.testmod()
697
0
import inspect import tempfile import unittest from huggingface_hub import hf_hub_download from transformers import is_torch_available from transformers.testing_utils import is_flaky, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin __SCREAMING_SNAKE_CASE : str = 1E-4 if is_torch_available(): import torch from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder @require_torch class SCREAMING_SNAKE_CASE__ : def __init__( self , __UpperCamelCase , __UpperCamelCase=16 , __UpperCamelCase=13 , __UpperCamelCase=7 , __UpperCamelCase=14 , __UpperCamelCase=10 , __UpperCamelCase=19 , __UpperCamelCase=5 , __UpperCamelCase=4 , __UpperCamelCase=True , __UpperCamelCase=16 , __UpperCamelCase=2 , __UpperCamelCase=4 , __UpperCamelCase=4 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=[1, 2, 3, 4, 5] , __UpperCamelCase=25 , __UpperCamelCase=5 , ): '''simple docstring''' __a : Union[str, Any] = d_model __a : int = parent __a : Tuple = batch_size __a : Tuple = prediction_length __a : Optional[Any] = context_length __a : str = cardinality __a : int = num_time_features __a : Any = lags_sequence __a : List[Any] = embedding_dimension __a : str = is_training __a : Optional[Any] = hidden_size __a : List[Any] = num_hidden_layers __a : Dict = num_attention_heads __a : int = intermediate_size __a : Any = hidden_act __a : str = hidden_dropout_prob __a : Union[str, Any] = attention_probs_dropout_prob __a : Dict = context_length __a : Union[str, Any] = prediction_length + label_length __a : Union[str, Any] = label_length __a : Tuple = moving_average __a : Tuple = autocorrelation_factor def __lowerCamelCase ( self ): '''simple docstring''' return AutoformerConfig( d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , ) def __lowerCamelCase ( self , __UpperCamelCase ): '''simple docstring''' __a : List[Any] = config.context_length + max(config.lags_sequence ) __a : int = ids_tensor([self.batch_size, 1] , config.cardinality[0] ) __a : Optional[Any] = floats_tensor([self.batch_size, _past_length, config.num_time_features] ) __a : List[Any] = floats_tensor([self.batch_size, _past_length] ) __a : Dict = floats_tensor([self.batch_size, _past_length] ) > 0.5 # decoder inputs __a : int = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] ) __a : Optional[Any] = floats_tensor([self.batch_size, config.prediction_length] ) __a : Any = { """past_values""": past_values, """static_categorical_features""": static_categorical_features, """past_time_features""": past_time_features, """past_observed_mask""": past_observed_mask, """future_time_features""": future_time_features, """future_values""": future_values, } return inputs_dict def __lowerCamelCase ( self ): '''simple docstring''' __a : Tuple = self.get_config() __a : Tuple = self.prepare_autoformer_inputs_dict(__UpperCamelCase ) return config, inputs_dict def __lowerCamelCase ( self ): '''simple docstring''' __a : List[str] = self.prepare_config_and_inputs() return config, inputs_dict def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' __a : Optional[Any] = AutoformerModel(config=__UpperCamelCase ).to(__UpperCamelCase ).eval() __a : Union[str, Any] = model(**__UpperCamelCase ) __a : int = outputs.encoder_last_hidden_state __a : Optional[int] = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: __a : List[str] = model.get_encoder() encoder.save_pretrained(__UpperCamelCase ) __a : Dict = AutoformerEncoder.from_pretrained(__UpperCamelCase ).to(__UpperCamelCase ) __a : Optional[int] = model.create_network_inputs(**__UpperCamelCase ) __a : str = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] ) __a : Optional[int] = torch.cat( (transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , ) __a : Optional[int] = encoder(inputs_embeds=__UpperCamelCase )[0] self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 ) __a : Optional[int] = ( torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 ) .unsqueeze(1 ) .repeat(1 , config.prediction_length , 1 ) ) __a : Optional[Any] = torch.zeros( [transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , ) __a : Tuple = torch.cat( ( torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ), feature[:, config.context_length - config.label_length :, ...], ) , dim=-1 , ) __a : Dict = torch.cat( ( torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ), feature[:, config.context_length - config.label_length :, ...], ) , dim=-1 , ) with tempfile.TemporaryDirectory() as tmpdirname: __a : List[Any] = model.get_decoder() decoder.save_pretrained(__UpperCamelCase ) __a : List[str] = AutoformerDecoder.from_pretrained(__UpperCamelCase ).to(__UpperCamelCase ) __a : Dict = decoder( trend=__UpperCamelCase , inputs_embeds=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , )[0] self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 ) @require_torch class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ): lowercase__ = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else () lowercase__ = (AutoformerForPrediction,) if is_torch_available() else () lowercase__ = {"feature-extraction": AutoformerModel} if is_torch_available() else {} lowercase__ = False lowercase__ = False lowercase__ = False lowercase__ = False lowercase__ = False lowercase__ = False def __lowerCamelCase ( self ): '''simple docstring''' __a : List[Any] = AutoformerModelTester(self ) __a : Optional[int] = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase ) def __lowerCamelCase ( self ): '''simple docstring''' self.config_tester.run_common_tests() def __lowerCamelCase ( self ): '''simple docstring''' __a : Union[str, Any] = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: __a : Any = model_class(__UpperCamelCase ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__UpperCamelCase ) __a : str = model_class.from_pretrained(__UpperCamelCase , output_loading_info=__UpperCamelCase ) self.assertEqual(info["""missing_keys"""] , [] ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*__UpperCamelCase ) @unittest.skip(reason="""Model has no tokens embeddings""" ) def __lowerCamelCase ( self ): '''simple docstring''' pass def __lowerCamelCase ( self ): '''simple docstring''' __a : Union[str, Any] = inspect.signature(getattr(__UpperCamelCase , """forward""" ) ) # The main input is the name of the argument after `self` __a : Any = list(model_signature.parameters.keys() )[1] self.assertEqual(AutoformerModel.main_input_name , __UpperCamelCase ) def __lowerCamelCase ( self ): '''simple docstring''' __a : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __a : Tuple = model_class(__UpperCamelCase ) __a : Optional[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __a : Union[str, Any] = [*signature.parameters.keys()] __a : Dict = [ """past_values""", """past_time_features""", """past_observed_mask""", """static_categorical_features""", """static_real_features""", """future_values""", """future_time_features""", ] if model.__class__.__name__ in ["AutoformerForPrediction"]: expected_arg_names.append("""future_observed_mask""" ) expected_arg_names.extend( [ """decoder_attention_mask""", """head_mask""", """decoder_head_mask""", """cross_attn_head_mask""", """encoder_outputs""", """past_key_values""", """output_hidden_states""", """output_attentions""", """use_cache""", """return_dict""", ] ) self.assertListEqual(arg_names[: len(__UpperCamelCase )] , __UpperCamelCase ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() __a : Any = True __a : List[str] = getattr(self.model_tester , """seq_length""" , __UpperCamelCase ) __a : Optional[int] = getattr(self.model_tester , """decoder_seq_length""" , __UpperCamelCase ) __a : Tuple = getattr(self.model_tester , """encoder_seq_length""" , __UpperCamelCase ) __a : Dict = getattr(self.model_tester , """d_model""" , __UpperCamelCase ) __a : List[str] = getattr(self.model_tester , """num_attention_heads""" , __UpperCamelCase ) __a : Any = d_model // num_attention_heads for model_class in self.all_model_classes: __a : List[str] = True __a : str = False __a : Tuple = True __a : Dict = model_class(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() with torch.no_grad(): __a : int = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) ) __a : Any = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] __a : Optional[int] = True __a : Union[str, Any] = model_class(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() with torch.no_grad(): __a : List[str] = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) ) __a : Any = outputs.encoder_attentions self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , ) __a : List[Any] = len(__UpperCamelCase ) __a : str = 7 if "last_hidden_state" in outputs: correct_outlen += 1 if "trend" in outputs: correct_outlen += 1 if "past_key_values" in outputs: correct_outlen += 1 # past_key_values have been returned if "loss" in outputs: correct_outlen += 1 if "params" in outputs: correct_outlen += 1 self.assertEqual(__UpperCamelCase , __UpperCamelCase ) # decoder attentions __a : int = outputs.decoder_attentions self.assertIsInstance(__UpperCamelCase , (list, tuple) ) self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , ) # cross attentions __a : Union[str, Any] = outputs.cross_attentions self.assertIsInstance(__UpperCamelCase , (list, tuple) ) self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , ) # Check attention is always last and order is fine __a : Union[str, Any] = True __a : List[str] = True __a : Union[str, Any] = model_class(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() with torch.no_grad(): __a : str = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) ) self.assertEqual(out_len + 2 , len(__UpperCamelCase ) ) __a : List[str] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , ) @is_flaky() def __lowerCamelCase ( self ): '''simple docstring''' super().test_retain_grad_hidden_states_attentions() def _snake_case ( lowercase="train-batch.pt" ) -> List[str]: __a : Union[str, Any] = hf_hub_download(repo_id="""hf-internal-testing/tourism-monthly-batch""" , filename=lowercase , repo_type="""dataset""" ) __a : str = torch.load(lowercase , map_location=lowercase ) return batch @require_torch @slow class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def __lowerCamelCase ( self ): '''simple docstring''' __a : Any = AutoformerModel.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__UpperCamelCase ) __a : Dict = prepare_batch() with torch.no_grad(): __a : List[str] = model( past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , future_values=batch["""future_values"""] , future_time_features=batch["""future_time_features"""] , )[0] __a : Dict = torch.Size( (64, model.config.prediction_length + model.config.label_length, model.config.feature_size) ) self.assertEqual(output.shape , __UpperCamelCase ) __a : List[Any] = torch.tensor( [[0.3_5_9_3, -1.3_3_9_8, 0.6_3_3_0], [0.2_2_7_9, 1.5_3_9_6, -0.1_7_9_2], [0.0_4_5_0, 1.3_2_2_5, -0.2_3_3_5]] , device=__UpperCamelCase ) self.assertTrue(torch.allclose(output[0, :3, :3] , __UpperCamelCase , atol=__UpperCamelCase ) ) def __lowerCamelCase ( self ): '''simple docstring''' __a : int = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__UpperCamelCase ) __a : List[str] = prepare_batch("""val-batch.pt""" ) with torch.no_grad(): __a : int = model( past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , ).encoder_last_hidden_state __a : Any = torch.Size((64, model.config.context_length, model.config.d_model) ) self.assertEqual(output.shape , __UpperCamelCase ) __a : Optional[Any] = torch.tensor( [[-0.0_7_3_4, -0.9_0_3_6, 0.8_3_5_8], [4.7_1_8_6, 2.4_1_1_3, 1.9_5_8_1], [1.7_9_5_3, 2.3_5_5_8, 1.2_9_7_0]] , device=__UpperCamelCase ) self.assertTrue(torch.allclose(output[0, :3, :3] , __UpperCamelCase , atol=__UpperCamelCase ) ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Optional[Any] = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__UpperCamelCase ) __a : Dict = prepare_batch("""val-batch.pt""" ) with torch.no_grad(): __a : int = model.generate( static_categorical_features=batch["""static_categorical_features"""] , past_time_features=batch["""past_time_features"""] , past_values=batch["""past_values"""] , future_time_features=batch["""future_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , ) __a : List[str] = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) ) self.assertEqual(outputs.sequences.shape , __UpperCamelCase ) __a : Union[str, Any] = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=__UpperCamelCase ) __a : Union[str, Any] = outputs.sequences.mean(dim=1 ) self.assertTrue(torch.allclose(mean_prediction[0, -3:] , __UpperCamelCase , rtol=1E-1 ) )
720
'''simple docstring''' import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DetrImageProcessor class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def __init__( self , __UpperCamelCase , __UpperCamelCase=7 , __UpperCamelCase=3 , __UpperCamelCase=30 , __UpperCamelCase=400 , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase=1 / 255 , __UpperCamelCase=True , __UpperCamelCase=[0.5, 0.5, 0.5] , __UpperCamelCase=[0.5, 0.5, 0.5] , __UpperCamelCase=True , ): '''simple docstring''' __a : List[Any] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333} __a : Dict = parent __a : Union[str, Any] = batch_size __a : Optional[int] = num_channels __a : Dict = min_resolution __a : List[Any] = max_resolution __a : int = do_resize __a : str = size __a : Optional[Any] = do_rescale __a : Optional[Any] = rescale_factor __a : str = do_normalize __a : Any = image_mean __a : Optional[Any] = image_std __a : Dict = do_pad def __lowerCamelCase ( self ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_pad": self.do_pad, } def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase=False ): '''simple docstring''' if not batched: __a : Union[str, Any] = image_inputs[0] if isinstance(__UpperCamelCase , Image.Image ): __a , __a : Tuple = image.size else: __a , __a : Tuple = image.shape[1], image.shape[2] if w < h: __a : Optional[int] = int(self.size["""shortest_edge"""] * h / w ) __a : Tuple = self.size["""shortest_edge"""] elif w > h: __a : Optional[Any] = self.size["""shortest_edge"""] __a : Any = int(self.size["""shortest_edge"""] * w / h ) else: __a : Any = self.size["""shortest_edge"""] __a : Optional[int] = self.size["""shortest_edge"""] else: __a : Any = [] for image in image_inputs: __a , __a : Any = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) __a : List[Any] = max(__UpperCamelCase , key=lambda __UpperCamelCase : item[0] )[0] __a : Optional[Any] = max(__UpperCamelCase , key=lambda __UpperCamelCase : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , unittest.TestCase ): lowercase__ = DetrImageProcessor if is_vision_available() else None def __lowerCamelCase ( self ): '''simple docstring''' __a : str = DetrImageProcessingTester(self ) @property def __lowerCamelCase ( self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def __lowerCamelCase ( self ): '''simple docstring''' __a : Optional[int] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__UpperCamelCase , """image_mean""" ) ) self.assertTrue(hasattr(__UpperCamelCase , """image_std""" ) ) self.assertTrue(hasattr(__UpperCamelCase , """do_normalize""" ) ) self.assertTrue(hasattr(__UpperCamelCase , """do_rescale""" ) ) self.assertTrue(hasattr(__UpperCamelCase , """rescale_factor""" ) ) self.assertTrue(hasattr(__UpperCamelCase , """do_resize""" ) ) self.assertTrue(hasattr(__UpperCamelCase , """size""" ) ) self.assertTrue(hasattr(__UpperCamelCase , """do_pad""" ) ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} ) self.assertEqual(image_processor.do_pad , __UpperCamelCase ) __a : List[Any] = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__UpperCamelCase ) self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} ) self.assertEqual(image_processor.do_pad , __UpperCamelCase ) def __lowerCamelCase ( self ): '''simple docstring''' pass def __lowerCamelCase ( self ): '''simple docstring''' __a : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __a : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase ) for image in image_inputs: self.assertIsInstance(__UpperCamelCase , Image.Image ) # Test not batched input __a : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values __a , __a : Any = self.image_processor_tester.get_expected_values(__UpperCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __a , __a : Optional[int] = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase ) __a : Any = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Dict = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __a : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , numpify=__UpperCamelCase ) for image in image_inputs: self.assertIsInstance(__UpperCamelCase , np.ndarray ) # Test not batched input __a : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values __a , __a : Any = self.image_processor_tester.get_expected_values(__UpperCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __a : List[str] = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values __a , __a : str = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Any = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __a : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , torchify=__UpperCamelCase ) for image in image_inputs: self.assertIsInstance(__UpperCamelCase , torch.Tensor ) # Test not batched input __a : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values __a , __a : Any = self.image_processor_tester.get_expected_values(__UpperCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __a : List[str] = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values __a , __a : Any = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def __lowerCamelCase ( self ): '''simple docstring''' __a : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f: __a : Dict = json.loads(f.read() ) __a : Optional[int] = {"""image_id""": 3_9769, """annotations""": target} # encode them __a : List[str] = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50""" ) __a : Tuple = image_processing(images=__UpperCamelCase , annotations=__UpperCamelCase , return_tensors="""pt""" ) # verify pixel values __a : Union[str, Any] = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding["""pixel_values"""].shape , __UpperCamelCase ) __a : List[str] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __UpperCamelCase , atol=1E-4 ) ) # verify area __a : List[Any] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __UpperCamelCase ) ) # verify boxes __a : Optional[int] = torch.Size([6, 4] ) self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __UpperCamelCase ) __a : Any = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __UpperCamelCase , atol=1E-3 ) ) # verify image_id __a : Union[str, Any] = torch.tensor([3_9769] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __UpperCamelCase ) ) # verify is_crowd __a : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __UpperCamelCase ) ) # verify class_labels __a : Any = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __UpperCamelCase ) ) # verify orig_size __a : Any = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __UpperCamelCase ) ) # verify size __a : str = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __UpperCamelCase ) ) @slow def __lowerCamelCase ( self ): '''simple docstring''' __a : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f: __a : Tuple = json.loads(f.read() ) __a : str = {"""file_name""": """000000039769.png""", """image_id""": 3_9769, """segments_info""": target} __a : int = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" ) # encode them __a : List[str] = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50-panoptic""" ) __a : Tuple = image_processing(images=__UpperCamelCase , annotations=__UpperCamelCase , masks_path=__UpperCamelCase , return_tensors="""pt""" ) # verify pixel values __a : List[str] = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding["""pixel_values"""].shape , __UpperCamelCase ) __a : Any = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __UpperCamelCase , atol=1E-4 ) ) # verify area __a : Optional[Any] = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __UpperCamelCase ) ) # verify boxes __a : Optional[Any] = torch.Size([6, 4] ) self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __UpperCamelCase ) __a : List[str] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __UpperCamelCase , atol=1E-3 ) ) # verify image_id __a : List[str] = torch.tensor([3_9769] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __UpperCamelCase ) ) # verify is_crowd __a : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __UpperCamelCase ) ) # verify class_labels __a : Optional[int] = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __UpperCamelCase ) ) # verify masks __a : Union[str, Any] = 82_2873 self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , __UpperCamelCase ) # verify orig_size __a : str = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __UpperCamelCase ) ) # verify size __a : List[Any] = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __UpperCamelCase ) )
697
0
'''simple docstring''' def _snake_case ( lowercase = 4_0_0_0_0_0_0 ) -> int: __a : int = [0, 1] __a : int = 0 while fib[i] <= n: fib.append(fib[i] + fib[i + 1] ) if fib[i + 2] > n: break i += 1 __a : Dict = 0 for j in range(len(lowercase ) - 1 ): if fib[j] % 2 == 0: total += fib[j] return total if __name__ == "__main__": print(f'''{solution() = }''')
721
'''simple docstring''' import argparse import logging import os import time import timeit import datasets import numpy as np import pycuda.autoinit # noqa: F401 import pycuda.driver as cuda import tensorrt as trt import torch from absl import logging as absl_logging from accelerate import Accelerator from datasets import load_dataset, load_metric from torch.utils.data import DataLoader from utils_qa import postprocess_qa_predictions import transformers from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed from transformers.trainer_pt_utils import nested_concat, nested_truncate __SCREAMING_SNAKE_CASE : Optional[int] = trt.Logger(trt.Logger.WARNING) __SCREAMING_SNAKE_CASE : Tuple = absl_logging.get_absl_logger() absl_logger.setLevel(logging.WARNING) __SCREAMING_SNAKE_CASE : Any = logging.getLogger(__name__) __SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser() # Required parameters parser.add_argument( '--onnx_model_path', default=None, type=str, required=True, help='Path to ONNX model: ', ) parser.add_argument( '--output_dir', default=None, type=str, required=True, help='The output directory where the model checkpoints and predictions will be written.', ) # Other parameters parser.add_argument( '--tokenizer_name', default='', type=str, required=True, help='Pretrained tokenizer name or path if not the same as model_name', ) parser.add_argument( '--version_2_with_negative', action='store_true', help='If true, the SQuAD examples contain some that do not have an answer.', ) parser.add_argument( '--null_score_diff_threshold', type=float, default=0.0, help='If null_score - best_non_null is greater than the threshold predict null.', ) parser.add_argument( '--max_seq_length', default=384, type=int, help=( 'The maximum total input sequence length after WordPiece tokenization. Sequences ' 'longer than this will be truncated, and sequences shorter than this will be padded.' ), ) parser.add_argument( '--doc_stride', default=128, type=int, help='When splitting up a long document into chunks, how much stride to take between chunks.', ) parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.') parser.add_argument( '--n_best_size', default=20, type=int, help='The total number of n-best predictions to generate in the nbest_predictions.json output file.', ) parser.add_argument( '--max_answer_length', default=30, type=int, help=( 'The maximum length of an answer that can be generated. This is needed because the start ' 'and end predictions are not conditioned on one another.' ), ) parser.add_argument('--seed', type=int, default=42, help='random seed for initialization') parser.add_argument( '--dataset_name', type=str, default=None, required=True, help='The name of the dataset to use (via the datasets library).', ) parser.add_argument( '--dataset_config_name', type=str, default=None, help='The configuration name of the dataset to use (via the datasets library).', ) parser.add_argument( '--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.' ) parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets') parser.add_argument( '--fp16', action='store_true', help='Whether to use 16-bit (mixed) precision instead of 32-bit', ) parser.add_argument( '--int8', action='store_true', help='Whether to use INT8', ) __SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args() if args.tokenizer_name: __SCREAMING_SNAKE_CASE : str = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True) else: raise ValueError( 'You are instantiating a new tokenizer from scratch. This is not supported by this script.' 'You can do it from another script, save it, and load it from here, using --tokenizer_name.' ) logger.info('Training/evaluation parameters %s', args) __SCREAMING_SNAKE_CASE : List[Any] = args.per_device_eval_batch_size __SCREAMING_SNAKE_CASE : int = (args.eval_batch_size, args.max_seq_length) # TRT Engine properties __SCREAMING_SNAKE_CASE : Optional[Any] = True __SCREAMING_SNAKE_CASE : Tuple = 'temp_engine/bert-fp32.engine' if args.fpaa: __SCREAMING_SNAKE_CASE : Dict = 'temp_engine/bert-fp16.engine' if args.inta: __SCREAMING_SNAKE_CASE : Tuple = 'temp_engine/bert-int8.engine' # import ONNX file if not os.path.exists('temp_engine'): os.makedirs('temp_engine') __SCREAMING_SNAKE_CASE : Optional[Any] = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH) with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser( network, TRT_LOGGER ) as parser: with open(args.onnx_model_path, 'rb') as model: if not parser.parse(model.read()): for error in range(parser.num_errors): print(parser.get_error(error)) # Query input names and shapes from parsed TensorRT network __SCREAMING_SNAKE_CASE : List[Any] = [network.get_input(i) for i in range(network.num_inputs)] __SCREAMING_SNAKE_CASE : List[Any] = [_input.name for _input in network_inputs] # ex: ["actual_input1"] with builder.create_builder_config() as config: __SCREAMING_SNAKE_CASE : Tuple = 1 << 50 if STRICT_TYPES: config.set_flag(trt.BuilderFlag.STRICT_TYPES) if args.fpaa: config.set_flag(trt.BuilderFlag.FPaa) if args.inta: config.set_flag(trt.BuilderFlag.INTa) __SCREAMING_SNAKE_CASE : Dict = builder.create_optimization_profile() config.add_optimization_profile(profile) for i in range(len(input_names)): profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE) __SCREAMING_SNAKE_CASE : Union[str, Any] = builder.build_engine(network, config) # serialize_engine and store in file (can be directly loaded and deserialized): with open(engine_name, 'wb') as f: f.write(engine.serialize()) def _snake_case ( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[Any]: __a : Dict = np.asarray(inputs["""input_ids"""] , dtype=np.intaa ) __a : List[Any] = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa ) __a : str = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa ) # Copy inputs cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowercase ) cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowercase ) cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowercase ) # start time __a : Optional[Any] = time.time() # Run inference context.execute_async( bindings=[int(lowercase ) for d_inp in d_inputs] + [int(lowercase ), int(lowercase )] , stream_handle=stream.handle ) # Transfer predictions back from GPU cuda.memcpy_dtoh_async(lowercase , lowercase , lowercase ) cuda.memcpy_dtoh_async(lowercase , lowercase , lowercase ) # Synchronize the stream and take time stream.synchronize() # end time __a : str = time.time() __a : Any = end_time - start_time __a : Optional[int] = (h_outputa, h_outputa) # print(outputs) return outputs, infer_time # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. __SCREAMING_SNAKE_CASE : Optional[Any] = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO, ) # Setup logging, we only want one process per machine to log things on the screen. # accelerator.is_local_main_process is only True for one process per machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). if args.dataset_name is not None: # Downloading and loading a dataset from the hub. __SCREAMING_SNAKE_CASE : List[str] = load_dataset(args.dataset_name, args.dataset_config_name) else: raise ValueError('Evaluation requires a dataset name') # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Preprocessing the datasets. # Preprocessing is slighlty different for training and evaluation. __SCREAMING_SNAKE_CASE : int = raw_datasets['validation'].column_names __SCREAMING_SNAKE_CASE : Tuple = 'question' if 'question' in column_names else column_names[0] __SCREAMING_SNAKE_CASE : List[Any] = 'context' if 'context' in column_names else column_names[1] __SCREAMING_SNAKE_CASE : Tuple = 'answers' if 'answers' in column_names else column_names[2] # Padding side determines if we do (question|context) or (context|question). __SCREAMING_SNAKE_CASE : Tuple = tokenizer.padding_side == 'right' if args.max_seq_length > tokenizer.model_max_length: logger.warning( f'''The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the''' f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' ) __SCREAMING_SNAKE_CASE : Dict = min(args.max_seq_length, tokenizer.model_max_length) def _snake_case ( lowercase ) -> Tuple: # Some of the questions have lots of whitespace on the left, which is not useful and will make the # truncation of the context fail (the tokenized question will take a lots of space). So we remove that # left whitespace __a : Optional[Any] = [q.lstrip() for q in examples[question_column_name]] # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results # in one example possible giving several features when a context is long, each of those features having a # context that overlaps a bit the context of the previous feature. __a : Optional[int] = tokenizer( examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=lowercase , stride=args.doc_stride , return_overflowing_tokens=lowercase , return_offsets_mapping=lowercase , padding="""max_length""" , ) # Since one example might give us several features if it has a long context, we need a map from a feature to # its corresponding example. This key gives us just that. __a : Optional[Any] = tokenized_examples.pop("""overflow_to_sample_mapping""" ) # For evaluation, we will need to convert our predictions to substrings of the context, so we keep the # corresponding example_id and we will store the offset mappings. __a : Optional[Any] = [] for i in range(len(tokenized_examples["""input_ids"""] ) ): # Grab the sequence corresponding to that example (to know what is the context and what is the question). __a : Dict = tokenized_examples.sequence_ids(lowercase ) __a : Optional[Any] = 1 if pad_on_right else 0 # One example can give several spans, this is the index of the example containing this span of text. __a : Union[str, Any] = sample_mapping[i] tokenized_examples["example_id"].append(examples["""id"""][sample_index] ) # Set to None the offset_mapping that are not part of the context so it's easy to determine if a token # position is part of the context or not. __a : int = [ (o if sequence_ids[k] == context_index else None) for k, o in enumerate(tokenized_examples["""offset_mapping"""][i] ) ] return tokenized_examples __SCREAMING_SNAKE_CASE : int = raw_datasets['validation'] # Validation Feature Creation __SCREAMING_SNAKE_CASE : Union[str, Any] = eval_examples.map( prepare_validation_features, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not args.overwrite_cache, desc='Running tokenizer on validation dataset', ) __SCREAMING_SNAKE_CASE : List[Any] = default_data_collator __SCREAMING_SNAKE_CASE : Union[str, Any] = eval_dataset.remove_columns(['example_id', 'offset_mapping']) __SCREAMING_SNAKE_CASE : List[str] = DataLoader( eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size ) def _snake_case ( lowercase , lowercase , lowercase , lowercase="eval" ) -> Any: # Post-processing: we match the start logits and end logits to answers in the original context. __a : List[str] = postprocess_qa_predictions( examples=lowercase , features=lowercase , predictions=lowercase , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowercase , ) # Format the result to the format the metric expects. if args.version_2_with_negative: __a : List[str] = [ {"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items() ] else: __a : List[str] = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()] __a : Optional[Any] = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples] return EvalPrediction(predictions=lowercase , label_ids=lowercase ) __SCREAMING_SNAKE_CASE : List[Any] = load_metric('squad_v2' if args.version_2_with_negative else 'squad') # Evaluation! logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path) with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine( f.read() ) as engine, engine.create_execution_context() as context: # setup for TRT inferrence for i in range(len(input_names)): context.set_binding_shape(i, INPUT_SHAPE) assert context.all_binding_shapes_specified def _snake_case ( lowercase ) -> Optional[int]: return trt.volume(engine.get_binding_shape(lowercase ) ) * engine.get_binding_dtype(lowercase ).itemsize # Allocate device memory for inputs and outputs. __SCREAMING_SNAKE_CASE : List[str] = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)] # Allocate output buffer __SCREAMING_SNAKE_CASE : str = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa) __SCREAMING_SNAKE_CASE : Union[str, Any] = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa) __SCREAMING_SNAKE_CASE : str = cuda.mem_alloc(h_outputa.nbytes) __SCREAMING_SNAKE_CASE : Tuple = cuda.mem_alloc(h_outputa.nbytes) # Create a stream in which to copy inputs/outputs and run inference. __SCREAMING_SNAKE_CASE : Tuple = cuda.Stream() # Evaluation logger.info('***** Running Evaluation *****') logger.info(f''' Num examples = {len(eval_dataset)}''') logger.info(f''' Batch size = {args.per_device_eval_batch_size}''') __SCREAMING_SNAKE_CASE : Union[str, Any] = 0.0 __SCREAMING_SNAKE_CASE : str = 0 __SCREAMING_SNAKE_CASE : str = timeit.default_timer() __SCREAMING_SNAKE_CASE : Dict = None for step, batch in enumerate(eval_dataloader): __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream) total_time += infer_time niter += 1 __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = outputs __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(start_logits) __SCREAMING_SNAKE_CASE : Tuple = torch.tensor(end_logits) # necessary to pad predictions and labels for being gathered __SCREAMING_SNAKE_CASE : Optional[int] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100) __SCREAMING_SNAKE_CASE : Dict = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100) __SCREAMING_SNAKE_CASE : List[str] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy()) __SCREAMING_SNAKE_CASE : List[str] = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100) if all_preds is not None: __SCREAMING_SNAKE_CASE : Tuple = nested_truncate(all_preds, len(eval_dataset)) __SCREAMING_SNAKE_CASE : str = timeit.default_timer() - start_time logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset)) # Inference time from TRT logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 1_000 / niter)) logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 1_000)) logger.info('Total Number of Inference = %d', niter) __SCREAMING_SNAKE_CASE : Optional[int] = post_processing_function(eval_examples, eval_dataset, all_preds) __SCREAMING_SNAKE_CASE : List[Any] = metric.compute(predictions=prediction.predictions, references=prediction.label_ids) logger.info(f'''Evaluation metrics: {eval_metric}''')
697
0
import os import re import warnings from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer if TYPE_CHECKING: from ...tokenization_utils_base import TextInput from ...utils import logging __A : Optional[int] = logging.get_logger(__name__) __A : Union[str, Any] = {'vocab_file': 'spiece.model'} __A : Union[str, Any] = { 'vocab_file': { 't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model', 't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model', 't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model', 't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model', 't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model', } } # TODO(PVP) - this should be removed in Transformers v5 __A : Any = { 't5-small': 5_1_2, 't5-base': 5_1_2, 't5-large': 5_1_2, 't5-3b': 5_1_2, 't5-11b': 5_1_2, } __A : Tuple = '▁' class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = VOCAB_FILES_NAMES lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__ = ["input_ids", "attention_mask"] def __init__( self : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any]="</s>" , __lowerCamelCase : Tuple="<unk>" , __lowerCamelCase : int="<pad>" , __lowerCamelCase : Dict=100 , __lowerCamelCase : str=None , __lowerCamelCase : Optional[Dict[str, Any]] = None , __lowerCamelCase : str=True , **__lowerCamelCase : Optional[Any] , ): # Add extra_ids to the special token list if extra_ids > 0 and additional_special_tokens is None: SCREAMING_SNAKE_CASE = [f"<extra_id_{i}>" for i in range(__lowerCamelCase )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra_id special tokens SCREAMING_SNAKE_CASE = len(set(filter(lambda __lowerCamelCase : bool("extra_id" in str(__lowerCamelCase ) ) , __lowerCamelCase ) ) ) if extra_tokens != extra_ids: raise ValueError( f"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are" " provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids" " tokens" ) if legacy: logger.warning_once( f"You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to" " read the related pull request available at https://github.com/huggingface/transformers/pull/24565" ) SCREAMING_SNAKE_CASE = legacy SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , extra_ids=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , legacy=__lowerCamelCase , **__lowerCamelCase , ) SCREAMING_SNAKE_CASE = vocab_file SCREAMING_SNAKE_CASE = extra_ids SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__lowerCamelCase ) @staticmethod def _snake_case ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Tuple ): if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes: SCREAMING_SNAKE_CASE = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path] if init_max_model_length is not None and init_max_model_length != max_model_length: return init_max_model_length elif init_max_model_length is None: warnings.warn( "This tokenizer was incorrectly instantiated with a model max length of" f" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this" " behavior is kept to avoid breaking backwards compatibility when padding/encoding with" " `truncation is True`.\n- Be aware that you SHOULD NOT rely on" f" {pretrained_model_name_or_path} automatically truncating your input to" f" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences" f" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with" " `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please" " instantiate this tokenizer with `model_max_length` set to your preferred value." , __lowerCamelCase , ) return max_model_length @property def _snake_case ( self : List[Any] ): return self.sp_model.get_piece_size() + self._extra_ids def _snake_case ( self : List[str] ): SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _snake_case ( self : List[str] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase ) # normal case: some special tokens if token_ids_a is None: return ([0] * len(__lowerCamelCase )) + [1] return ([0] * len(__lowerCamelCase )) + [1] + ([0] * len(__lowerCamelCase )) + [1] def _snake_case ( self : Any ): return list( set(filter(lambda __lowerCamelCase : bool(re.search(r"<extra_id_\d+>" , __lowerCamelCase ) ) is not None , self.additional_special_tokens ) ) ) def _snake_case ( self : Tuple ): return [self._convert_token_to_id(__lowerCamelCase ) for token in self.get_sentinel_tokens()] def _snake_case ( self : List[str] , __lowerCamelCase : List[int] ): if len(__lowerCamelCase ) > 0 and token_ids[-1] == self.eos_token_id: warnings.warn( f"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated" " eos tokens being added." ) return token_ids else: return token_ids + [self.eos_token_id] def _snake_case ( self : Dict , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ): SCREAMING_SNAKE_CASE = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def _snake_case ( self : List[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ): SCREAMING_SNAKE_CASE = self._add_eos_if_not_present(__lowerCamelCase ) if token_ids_a is None: return token_ids_a else: SCREAMING_SNAKE_CASE = self._add_eos_if_not_present(__lowerCamelCase ) return token_ids_a + token_ids_a def __getstate__( self : Union[str, Any] ): SCREAMING_SNAKE_CASE = self.__dict__.copy() SCREAMING_SNAKE_CASE = None return state def __setstate__( self : Optional[int] , __lowerCamelCase : Optional[Any] ): SCREAMING_SNAKE_CASE = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): SCREAMING_SNAKE_CASE = {} SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _snake_case ( self : Dict , __lowerCamelCase : "TextInput" , **__lowerCamelCase : List[str] ): # Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at # the beginning of the text if not self.legacy: SCREAMING_SNAKE_CASE = SPIECE_UNDERLINE + text.replace(__lowerCamelCase , " " ) return super().tokenize(__lowerCamelCase , **__lowerCamelCase ) def _snake_case ( self : Optional[Any] , __lowerCamelCase : Any , **__lowerCamelCase : str ): if not self.legacy: SCREAMING_SNAKE_CASE = text.startswith(__lowerCamelCase ) if is_first: SCREAMING_SNAKE_CASE = text[1:] SCREAMING_SNAKE_CASE = self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase ) if not self.legacy and not is_first and not text.startswith(" " ) and tokens[0].startswith(__lowerCamelCase ): SCREAMING_SNAKE_CASE = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:] return tokens def _snake_case ( self : List[str] , __lowerCamelCase : Union[str, Any] ): if token.startswith("<extra_id_" ): SCREAMING_SNAKE_CASE = re.match(r"<extra_id_(\d+)>" , __lowerCamelCase ) SCREAMING_SNAKE_CASE = int(match.group(1 ) ) return self.vocab_size - num - 1 return self.sp_model.piece_to_id(__lowerCamelCase ) def _snake_case ( self : int , __lowerCamelCase : Dict ): if index < self.sp_model.get_piece_size(): SCREAMING_SNAKE_CASE = self.sp_model.IdToPiece(__lowerCamelCase ) else: SCREAMING_SNAKE_CASE = f"<extra_id_{self.vocab_size - 1 - index}>" return token def _snake_case ( self : Optional[int] , __lowerCamelCase : int ): SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = "" SCREAMING_SNAKE_CASE = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(__lowerCamelCase ) + token SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = [] else: current_sub_tokens.append(__lowerCamelCase ) SCREAMING_SNAKE_CASE = False out_string += self.sp_model.decode(__lowerCamelCase ) return out_string.strip() def _snake_case ( self : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ): if not os.path.isdir(__lowerCamelCase ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return SCREAMING_SNAKE_CASE = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __lowerCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(__lowerCamelCase , "wb" ) as fi: SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto() fi.write(__lowerCamelCase ) return (out_vocab_file,)
698
import json import os from functools import lru_cache from typing import TYPE_CHECKING, List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation __A : Optional[int] = logging.get_logger(__name__) __A : List[str] = { 'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_config_file': 'tokenizer_config.json', } __A : Tuple = { 'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'}, 'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'}, 'tokenizer_config_file': { 'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json' }, } __A : Dict = {'facebook/blenderbot-3B': 1_2_8} @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def __a ( ): SCREAMING_SNAKE_CASE = ( list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) ) ) SCREAMING_SNAKE_CASE = bs[:] SCREAMING_SNAKE_CASE = 0 for b in range(2**8 ): if b not in bs: bs.append(A__ ) cs.append(2**8 + n ) n += 1 SCREAMING_SNAKE_CASE = [chr(A__ ) for n in cs] return dict(zip(A__ , A__ ) ) def __a ( A__ : Optional[int] ): SCREAMING_SNAKE_CASE = set() SCREAMING_SNAKE_CASE = word[0] for char in word[1:]: pairs.add((prev_char, char) ) SCREAMING_SNAKE_CASE = char return pairs class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = VOCAB_FILES_NAMES lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__ = ["input_ids", "attention_mask"] def __init__( self : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : List[Any]="replace" , __lowerCamelCase : Any="<s>" , __lowerCamelCase : List[Any]="</s>" , __lowerCamelCase : Dict="</s>" , __lowerCamelCase : Any="<s>" , __lowerCamelCase : Optional[Any]="<unk>" , __lowerCamelCase : str="<pad>" , __lowerCamelCase : List[Any]="<mask>" , __lowerCamelCase : List[Any]=False , **__lowerCamelCase : Tuple , ): SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token super().__init__( errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , **__lowerCamelCase , ) with open(__lowerCamelCase , encoding="utf-8" ) as vocab_handle: SCREAMING_SNAKE_CASE = json.load(__lowerCamelCase ) SCREAMING_SNAKE_CASE = {v: k for k, v in self.encoder.items()} SCREAMING_SNAKE_CASE = errors # how to handle errors in decoding SCREAMING_SNAKE_CASE = bytes_to_unicode() SCREAMING_SNAKE_CASE = {v: k for k, v in self.byte_encoder.items()} with open(__lowerCamelCase , encoding="utf-8" ) as merges_handle: SCREAMING_SNAKE_CASE = merges_handle.read().split("\n" )[1:-1] SCREAMING_SNAKE_CASE = [tuple(merge.split() ) for merge in bpe_merges] SCREAMING_SNAKE_CASE = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) ) SCREAMING_SNAKE_CASE = {} SCREAMING_SNAKE_CASE = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions SCREAMING_SNAKE_CASE = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" ) @property # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot def _snake_case ( self : Dict ): return len(self.encoder ) def _snake_case ( self : int ): return dict(self.encoder , **self.added_tokens_encoder ) def _snake_case ( self : Tuple , __lowerCamelCase : List[str] ): if token in self.cache: return self.cache[token] SCREAMING_SNAKE_CASE = tuple(__lowerCamelCase ) SCREAMING_SNAKE_CASE = get_pairs(__lowerCamelCase ) if not pairs: return token while True: SCREAMING_SNAKE_CASE = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float("inf" ) ) ) if bigram not in self.bpe_ranks: break SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = bigram SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = 0 while i < len(__lowerCamelCase ): try: SCREAMING_SNAKE_CASE = word.index(__lowerCamelCase , __lowerCamelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) SCREAMING_SNAKE_CASE = j if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 SCREAMING_SNAKE_CASE = tuple(__lowerCamelCase ) SCREAMING_SNAKE_CASE = new_word if len(__lowerCamelCase ) == 1: break else: SCREAMING_SNAKE_CASE = get_pairs(__lowerCamelCase ) SCREAMING_SNAKE_CASE = " ".join(__lowerCamelCase ) SCREAMING_SNAKE_CASE = word return word def _snake_case ( self : str , __lowerCamelCase : Dict ): SCREAMING_SNAKE_CASE = [] for token in re.findall(self.pat , __lowerCamelCase ): SCREAMING_SNAKE_CASE = "".join( self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowerCamelCase ).split(" " ) ) return bpe_tokens def _snake_case ( self : List[str] , __lowerCamelCase : str ): return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) ) def _snake_case ( self : Optional[int] , __lowerCamelCase : Tuple ): return self.decoder.get(__lowerCamelCase ) def _snake_case ( self : Tuple , __lowerCamelCase : str ): SCREAMING_SNAKE_CASE = "".join(__lowerCamelCase ) SCREAMING_SNAKE_CASE = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors ) return text def _snake_case ( self : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ): if not os.path.isdir(__lowerCamelCase ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return SCREAMING_SNAKE_CASE = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) SCREAMING_SNAKE_CASE = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + "\n" ) SCREAMING_SNAKE_CASE = 0 with open(__lowerCamelCase , "w" , encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase : kv[1] ): if index != token_index: logger.warning( f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive." " Please check that the tokenizer is not corrupted!" ) SCREAMING_SNAKE_CASE = token_index writer.write(" ".join(__lowerCamelCase ) + "\n" ) index += 1 return vocab_file, merge_file def _snake_case ( self : Tuple , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase ) if token_ids_a is None: return [1] + ([0] * len(__lowerCamelCase )) + [1] return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1] def _snake_case ( self : str , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ): SCREAMING_SNAKE_CASE = [self.sep_token_id] SCREAMING_SNAKE_CASE = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _snake_case ( self : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict=False , **__lowerCamelCase : Optional[int] ): SCREAMING_SNAKE_CASE = kwargs.pop("add_prefix_space" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(__lowerCamelCase ) > 0 and not text[0].isspace()): SCREAMING_SNAKE_CASE = " " + text return (text, kwargs) def _snake_case ( self : Dict , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ): return token_ids_a + [self.eos_token_id] def _snake_case ( self : Tuple , __lowerCamelCase : "Conversation" ): SCREAMING_SNAKE_CASE = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(" " + text ) else: # Generated responses should contain them already. inputs.append(__lowerCamelCase ) SCREAMING_SNAKE_CASE = " ".join(__lowerCamelCase ) SCREAMING_SNAKE_CASE = self.encode(__lowerCamelCase ) if len(__lowerCamelCase ) > self.model_max_length: SCREAMING_SNAKE_CASE = input_ids[-self.model_max_length :] logger.warning(f"Trimmed input from conversation as it was longer than {self.model_max_length} tokens." ) return input_ids
698
1
__A : Tuple = 'Tobias Carryer' from time import time class _SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : List[str] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple=int(time() ) ): # noqa: B008 SCREAMING_SNAKE_CASE = multiplier SCREAMING_SNAKE_CASE = increment SCREAMING_SNAKE_CASE = modulo SCREAMING_SNAKE_CASE = seed def _snake_case ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE = (self.multiplier * self.seed + self.increment) % self.modulo return self.seed if __name__ == "__main__": # Show the LCG in action. __A : Any = LinearCongruentialGenerator(1_6_6_4_5_2_5, 1_0_1_3_9_0_4_2_2_3, 2 << 3_1) while True: print(lcg.next_number())
698
from __future__ import annotations from cmath import sqrt def __a ( A__ : int , A__ : int , A__ : int ): if a == 0: raise ValueError("Coefficient 'a' must not be zero." ) SCREAMING_SNAKE_CASE = b * b - 4 * a * c SCREAMING_SNAKE_CASE = (-b + sqrt(A__ )) / (2 * a) SCREAMING_SNAKE_CASE = (-b - sqrt(A__ )) / (2 * a) return ( root_a.real if not root_a.imag else root_a, root_a.real if not root_a.imag else root_a, ) def __a ( ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = quadratic_roots(a=5 , b=6 , c=1 ) print(F"The solutions are: {solutiona} and {solutiona}" ) if __name__ == "__main__": main()
698
1
from ...configuration_utils import PretrainedConfig from ...utils import logging __A : List[str] = logging.get_logger(__name__) __A : Optional[Any] = { 'facebook/dpr-ctx_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json' ), 'facebook/dpr-question_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json' ), 'facebook/dpr-reader-single-nq-base': ( 'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json' ), 'facebook/dpr-ctx_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json' ), 'facebook/dpr-question_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json' ), 'facebook/dpr-reader-multiset-base': ( 'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json' ), } class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = "dpr" def __init__( self : List[str] , __lowerCamelCase : List[Any]=30522 , __lowerCamelCase : Optional[Any]=768 , __lowerCamelCase : Union[str, Any]=12 , __lowerCamelCase : int=12 , __lowerCamelCase : int=3072 , __lowerCamelCase : int="gelu" , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : Optional[int]=512 , __lowerCamelCase : str=2 , __lowerCamelCase : Tuple=0.02 , __lowerCamelCase : Tuple=1e-12 , __lowerCamelCase : int=0 , __lowerCamelCase : Optional[Any]="absolute" , __lowerCamelCase : int = 0 , **__lowerCamelCase : Optional[Any] , ): super().__init__(pad_token_id=__lowerCamelCase , **__lowerCamelCase ) SCREAMING_SNAKE_CASE = vocab_size SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = num_hidden_layers SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = intermediate_size SCREAMING_SNAKE_CASE = hidden_dropout_prob SCREAMING_SNAKE_CASE = attention_probs_dropout_prob SCREAMING_SNAKE_CASE = max_position_embeddings SCREAMING_SNAKE_CASE = type_vocab_size SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = layer_norm_eps SCREAMING_SNAKE_CASE = projection_dim SCREAMING_SNAKE_CASE = position_embedding_type
698
import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self : Dict ): SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__lowerCamelCase ) SCREAMING_SNAKE_CASE = -1 SCREAMING_SNAKE_CASE = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCamelCase ) SCREAMING_SNAKE_CASE = model.generate(__lowerCamelCase , max_new_tokens=10 , do_sample=__lowerCamelCase ) SCREAMING_SNAKE_CASE = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: SCREAMING_SNAKE_CASE = TextStreamer(__lowerCamelCase ) model.generate(__lowerCamelCase , max_new_tokens=10 , do_sample=__lowerCamelCase , streamer=__lowerCamelCase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer SCREAMING_SNAKE_CASE = cs.out[:-1] self.assertEqual(__lowerCamelCase , __lowerCamelCase ) def _snake_case ( self : int ): SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__lowerCamelCase ) SCREAMING_SNAKE_CASE = -1 SCREAMING_SNAKE_CASE = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCamelCase ) SCREAMING_SNAKE_CASE = model.generate(__lowerCamelCase , max_new_tokens=10 , do_sample=__lowerCamelCase ) SCREAMING_SNAKE_CASE = tokenizer.decode(greedy_ids[0] ) SCREAMING_SNAKE_CASE = TextIteratorStreamer(__lowerCamelCase ) SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer} SCREAMING_SNAKE_CASE = Thread(target=model.generate , kwargs=__lowerCamelCase ) thread.start() SCREAMING_SNAKE_CASE = "" for new_text in streamer: streamer_text += new_text self.assertEqual(__lowerCamelCase , __lowerCamelCase ) def _snake_case ( self : List[str] ): SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__lowerCamelCase ) SCREAMING_SNAKE_CASE = -1 SCREAMING_SNAKE_CASE = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCamelCase ) SCREAMING_SNAKE_CASE = model.generate(__lowerCamelCase , max_new_tokens=10 , do_sample=__lowerCamelCase ) SCREAMING_SNAKE_CASE = greedy_ids[:, input_ids.shape[1] :] SCREAMING_SNAKE_CASE = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: SCREAMING_SNAKE_CASE = TextStreamer(__lowerCamelCase , skip_prompt=__lowerCamelCase ) model.generate(__lowerCamelCase , max_new_tokens=10 , do_sample=__lowerCamelCase , streamer=__lowerCamelCase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer SCREAMING_SNAKE_CASE = cs.out[:-1] self.assertEqual(__lowerCamelCase , __lowerCamelCase ) def _snake_case ( self : int ): # Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested # with actual models -- the dummy models' tokenizers are not aligned with their models, and # `skip_special_tokens=True` has no effect on them SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("distilgpt2" ) SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(__lowerCamelCase ) SCREAMING_SNAKE_CASE = -1 SCREAMING_SNAKE_CASE = torch.ones((1, 5) , device=__lowerCamelCase ).long() * model.config.bos_token_id with CaptureStdout() as cs: SCREAMING_SNAKE_CASE = TextStreamer(__lowerCamelCase , skip_special_tokens=__lowerCamelCase ) model.generate(__lowerCamelCase , max_new_tokens=1 , do_sample=__lowerCamelCase , streamer=__lowerCamelCase ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token SCREAMING_SNAKE_CASE = cs.out[:-1] # Remove the final "\n" SCREAMING_SNAKE_CASE = tokenizer(__lowerCamelCase , return_tensors="pt" ) self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) ) def _snake_case ( self : Optional[int] ): SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__lowerCamelCase ) SCREAMING_SNAKE_CASE = -1 SCREAMING_SNAKE_CASE = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCamelCase ) SCREAMING_SNAKE_CASE = TextIteratorStreamer(__lowerCamelCase , timeout=0.001 ) SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer} SCREAMING_SNAKE_CASE = Thread(target=model.generate , kwargs=__lowerCamelCase ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(__lowerCamelCase ): SCREAMING_SNAKE_CASE = "" for new_text in streamer: streamer_text += new_text
698
1
# tests directory-specific settings - this file is run automatically # by pytest before any tests are run import doctest import sys import warnings from os.path import abspath, dirname, join import _pytest from transformers.testing_utils import HfDoctestModule, HfDocTestParser # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. __A : str = abspath(join(dirname(__file__), 'src')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='ignore', category=FutureWarning) def __a ( A__ : Any ): config.addinivalue_line( "markers" , "is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested" ) config.addinivalue_line( "markers" , "is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested" ) config.addinivalue_line("markers" , "is_pipeline_test: mark test to run only when pipelines are tested" ) config.addinivalue_line("markers" , "is_staging_test: mark test to run only in the staging environment" ) config.addinivalue_line("markers" , "accelerate_tests: mark test that require accelerate" ) config.addinivalue_line("markers" , "tool_tests: mark the tool tests that are run on their specific schedule" ) def __a ( A__ : Union[str, Any] ): from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(A__ ) def __a ( A__ : List[Any] ): from transformers.testing_utils import pytest_terminal_summary_main SCREAMING_SNAKE_CASE = terminalreporter.config.getoption("--make-reports" ) if make_reports: pytest_terminal_summary_main(A__ , id=A__ ) def __a ( A__ : Dict , A__ : List[str] ): # If no tests are collected, pytest exists with code 5, which makes the CI fail. if exitstatus == 5: SCREAMING_SNAKE_CASE = 0 # Doctest custom flag to ignore output. __A : List[str] = doctest.register_optionflag('IGNORE_RESULT') __A : Any = doctest.OutputChecker class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' def _snake_case ( self : Dict , __lowerCamelCase : str , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any ): if IGNORE_RESULT & optionflags: return True return OutputChecker.check_output(self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) __A : Tuple = CustomOutputChecker __A : Union[str, Any] = HfDoctestModule __A : Tuple = HfDocTestParser
698
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) __A : Union[str, Any] = {'configuration_fnet': ['FNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FNetConfig']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : List[str] = ['FNetTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Any = ['FNetTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Union[str, Any] = [ 'FNET_PRETRAINED_MODEL_ARCHIVE_LIST', 'FNetForMaskedLM', 'FNetForMultipleChoice', 'FNetForNextSentencePrediction', 'FNetForPreTraining', 'FNetForQuestionAnswering', 'FNetForSequenceClassification', 'FNetForTokenClassification', 'FNetLayer', 'FNetModel', 'FNetPreTrainedModel', ] if TYPE_CHECKING: from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet import FNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet_fast import FNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_fnet import ( FNET_PRETRAINED_MODEL_ARCHIVE_LIST, FNetForMaskedLM, FNetForMultipleChoice, FNetForNextSentencePrediction, FNetForPreTraining, FNetForQuestionAnswering, FNetForSequenceClassification, FNetForTokenClassification, FNetLayer, FNetModel, FNetPreTrainedModel, ) else: import sys __A : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
698
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL __A : List[Any] = logging.get_logger(__name__) def __a ( A__ : Tuple ): if isinstance(A__ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(A__ , (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(A__ ): return [[videos]] raise ValueError(F"Could not make batched video from {videos}" ) class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = ["pixel_values"] def __init__( self : str , __lowerCamelCase : bool = True , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : PILImageResampling = PILImageResampling.BILINEAR , __lowerCamelCase : bool = True , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : bool = True , __lowerCamelCase : Union[int, float] = 1 / 255 , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : Optional[Union[float, List[float]]] = None , **__lowerCamelCase : List[str] , ): super().__init__(**__lowerCamelCase ) SCREAMING_SNAKE_CASE = size if size is not None else {"shortest_edge": 224} SCREAMING_SNAKE_CASE = get_size_dict(__lowerCamelCase , default_to_square=__lowerCamelCase ) SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else {"height": 224, "width": 224} SCREAMING_SNAKE_CASE = get_size_dict(__lowerCamelCase , param_name="crop_size" ) SCREAMING_SNAKE_CASE = do_resize SCREAMING_SNAKE_CASE = size SCREAMING_SNAKE_CASE = do_center_crop SCREAMING_SNAKE_CASE = crop_size SCREAMING_SNAKE_CASE = resample SCREAMING_SNAKE_CASE = do_rescale SCREAMING_SNAKE_CASE = rescale_factor SCREAMING_SNAKE_CASE = do_normalize SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN SCREAMING_SNAKE_CASE = image_std if image_std is not None else IMAGENET_STANDARD_STD def _snake_case ( self : List[Any] , __lowerCamelCase : np.ndarray , __lowerCamelCase : Dict[str, int] , __lowerCamelCase : PILImageResampling = PILImageResampling.BILINEAR , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : Optional[Any] , ): SCREAMING_SNAKE_CASE = get_size_dict(__lowerCamelCase , default_to_square=__lowerCamelCase ) if "shortest_edge" in size: SCREAMING_SNAKE_CASE = get_resize_output_image_size(__lowerCamelCase , size["shortest_edge"] , default_to_square=__lowerCamelCase ) elif "height" in size and "width" in size: SCREAMING_SNAKE_CASE = (size["height"], size["width"]) else: raise ValueError(f"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" ) return resize(__lowerCamelCase , size=__lowerCamelCase , resample=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase ) def _snake_case ( self : Any , __lowerCamelCase : np.ndarray , __lowerCamelCase : Dict[str, int] , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : str , ): SCREAMING_SNAKE_CASE = get_size_dict(__lowerCamelCase ) if "height" not in size or "width" not in size: raise ValueError(f"Size must have 'height' and 'width' as keys. Got {size.keys()}" ) return center_crop(__lowerCamelCase , size=(size["height"], size["width"]) , data_format=__lowerCamelCase , **__lowerCamelCase ) def _snake_case ( self : List[Any] , __lowerCamelCase : np.ndarray , __lowerCamelCase : Union[int, float] , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : List[str] , ): return rescale(__lowerCamelCase , scale=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase ) def _snake_case ( self : str , __lowerCamelCase : np.ndarray , __lowerCamelCase : Union[float, List[float]] , __lowerCamelCase : Union[float, List[float]] , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : Dict , ): return normalize(__lowerCamelCase , mean=__lowerCamelCase , std=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase ) def _snake_case ( self : int , __lowerCamelCase : ImageInput , __lowerCamelCase : bool = None , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : PILImageResampling = None , __lowerCamelCase : bool = None , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : bool = None , __lowerCamelCase : float = None , __lowerCamelCase : bool = None , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , ): if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # All transformations expect numpy arrays. SCREAMING_SNAKE_CASE = to_numpy_array(__lowerCamelCase ) if do_resize: SCREAMING_SNAKE_CASE = self.resize(image=__lowerCamelCase , size=__lowerCamelCase , resample=__lowerCamelCase ) if do_center_crop: SCREAMING_SNAKE_CASE = self.center_crop(__lowerCamelCase , size=__lowerCamelCase ) if do_rescale: SCREAMING_SNAKE_CASE = self.rescale(image=__lowerCamelCase , scale=__lowerCamelCase ) if do_normalize: SCREAMING_SNAKE_CASE = self.normalize(image=__lowerCamelCase , mean=__lowerCamelCase , std=__lowerCamelCase ) SCREAMING_SNAKE_CASE = to_channel_dimension_format(__lowerCamelCase , __lowerCamelCase ) return image def _snake_case ( self : List[Any] , __lowerCamelCase : ImageInput , __lowerCamelCase : bool = None , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : PILImageResampling = None , __lowerCamelCase : bool = None , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : bool = None , __lowerCamelCase : float = None , __lowerCamelCase : bool = None , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : Optional[Union[str, TensorType]] = None , __lowerCamelCase : ChannelDimension = ChannelDimension.FIRST , **__lowerCamelCase : List[str] , ): SCREAMING_SNAKE_CASE = do_resize if do_resize is not None else self.do_resize SCREAMING_SNAKE_CASE = resample if resample is not None else self.resample SCREAMING_SNAKE_CASE = do_center_crop if do_center_crop is not None else self.do_center_crop SCREAMING_SNAKE_CASE = do_rescale if do_rescale is not None else self.do_rescale SCREAMING_SNAKE_CASE = rescale_factor if rescale_factor is not None else self.rescale_factor SCREAMING_SNAKE_CASE = do_normalize if do_normalize is not None else self.do_normalize SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else self.image_mean SCREAMING_SNAKE_CASE = image_std if image_std is not None else self.image_std SCREAMING_SNAKE_CASE = size if size is not None else self.size SCREAMING_SNAKE_CASE = get_size_dict(__lowerCamelCase , default_to_square=__lowerCamelCase ) SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else self.crop_size SCREAMING_SNAKE_CASE = get_size_dict(__lowerCamelCase , param_name="crop_size" ) if not valid_images(__lowerCamelCase ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) SCREAMING_SNAKE_CASE = make_batched(__lowerCamelCase ) SCREAMING_SNAKE_CASE = [ [ self._preprocess_image( image=__lowerCamelCase , do_resize=__lowerCamelCase , size=__lowerCamelCase , resample=__lowerCamelCase , do_center_crop=__lowerCamelCase , crop_size=__lowerCamelCase , do_rescale=__lowerCamelCase , rescale_factor=__lowerCamelCase , do_normalize=__lowerCamelCase , image_mean=__lowerCamelCase , image_std=__lowerCamelCase , data_format=__lowerCamelCase , ) for img in video ] for video in videos ] SCREAMING_SNAKE_CASE = {"pixel_values": videos} return BatchFeature(data=__lowerCamelCase , tensor_type=__lowerCamelCase )
698
from collections import Counter import numpy as np from sklearn import datasets from sklearn.model_selection import train_test_split __A : Optional[Any] = datasets.load_iris() __A : Optional[Any] = np.array(data['data']) __A : Optional[int] = np.array(data['target']) __A : Union[str, Any] = data['target_names'] __A , __A , __A , __A : Optional[int] = train_test_split(X, y) def __a ( A__ : Optional[int] , A__ : Dict ): return np.linalg.norm(np.array(A__ ) - np.array(A__ ) ) def __a ( A__ : Optional[Any] , A__ : int , A__ : Dict , A__ : Optional[Any] , A__ : Dict=5 ): SCREAMING_SNAKE_CASE = zip(A__ , A__ ) # List of distances of all points from the point to be classified SCREAMING_SNAKE_CASE = [] for data_point in data: SCREAMING_SNAKE_CASE = euclidean_distance(data_point[0] , A__ ) distances.append((distance, data_point[1]) ) # Choosing 'k' points with the least distances. SCREAMING_SNAKE_CASE = [i[1] for i in sorted(A__ )[:k]] # Most commonly occurring class among them # is the class into which the point is classified SCREAMING_SNAKE_CASE = Counter(A__ ).most_common(1 )[0][0] return classes[result] if __name__ == "__main__": print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
698
1
import math def __a ( A__ : int ): assert isinstance(A__ , A__ ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or not number % 2: # Negatives, 0, 1 and all even numbers are not primes return False SCREAMING_SNAKE_CASE = range(3 , int(math.sqrt(A__ ) + 1 ) , 2 ) return not any(not number % i for i in odd_numbers ) def __a ( A__ : Optional[int] , A__ : List[Any]=1 , **A__ : Optional[int] ): SCREAMING_SNAKE_CASE = factor * value SCREAMING_SNAKE_CASE = value while not is_prime(A__ ): value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1 if value == first_value_val: return next_prime(value + 1 , **A__ ) return value
698
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __A : Union[str, Any] = logging.get_logger(__name__) __A : Dict = { 'microsoft/resnet-50': 'https://huggingface.co/microsoft/resnet-50/blob/main/config.json', } class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ): '''simple docstring''' lowerCamelCase__ = "resnet" lowerCamelCase__ = ["basic", "bottleneck"] def __init__( self : Optional[Any] , __lowerCamelCase : int=3 , __lowerCamelCase : Dict=64 , __lowerCamelCase : str=[256, 512, 1024, 2048] , __lowerCamelCase : str=[3, 4, 6, 3] , __lowerCamelCase : Optional[int]="bottleneck" , __lowerCamelCase : int="relu" , __lowerCamelCase : List[str]=False , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : int=None , **__lowerCamelCase : Dict , ): super().__init__(**__lowerCamelCase ) if layer_type not in self.layer_types: raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" ) SCREAMING_SNAKE_CASE = num_channels SCREAMING_SNAKE_CASE = embedding_size SCREAMING_SNAKE_CASE = hidden_sizes SCREAMING_SNAKE_CASE = depths SCREAMING_SNAKE_CASE = layer_type SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = downsample_in_first_stage SCREAMING_SNAKE_CASE = ["stem"] + [f"stage{idx}" for idx in range(1 , len(__lowerCamelCase ) + 1 )] SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_aligned_output_features_output_indices( out_features=__lowerCamelCase , out_indices=__lowerCamelCase , stage_names=self.stage_names ) class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = version.parse("1.11" ) @property def _snake_case ( self : Optional[int] ): return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def _snake_case ( self : Optional[int] ): return 1e-3
698
1
import random from typing import Any def __a ( A__ : list ): for _ in range(len(A__ ) ): SCREAMING_SNAKE_CASE = random.randint(0 , len(A__ ) - 1 ) SCREAMING_SNAKE_CASE = random.randint(0 , len(A__ ) - 1 ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = data[b], data[a] return data if __name__ == "__main__": __A : Tuple = [0, 1, 2, 3, 4, 5, 6, 7] __A : Union[str, Any] = ['python', 'says', 'hello', '!'] print('Fisher-Yates Shuffle:') print('List', integers, strings) print('FY Shuffle', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
698
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, TensorType __A : str = logging.get_logger(__name__) __A : Optional[Any] = { 'openai/imagegpt-small': '', 'openai/imagegpt-medium': '', 'openai/imagegpt-large': '', } class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = "imagegpt" lowerCamelCase__ = ["past_key_values"] lowerCamelCase__ = { "hidden_size": "n_embd", "max_position_embeddings": "n_positions", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self : List[str] , __lowerCamelCase : Any=512 + 1 , __lowerCamelCase : str=32 * 32 , __lowerCamelCase : Any=512 , __lowerCamelCase : Optional[int]=24 , __lowerCamelCase : Tuple=8 , __lowerCamelCase : List[str]=None , __lowerCamelCase : List[Any]="quick_gelu" , __lowerCamelCase : int=0.1 , __lowerCamelCase : str=0.1 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Optional[Any]=1e-5 , __lowerCamelCase : Optional[Any]=0.02 , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : List[Any]=False , __lowerCamelCase : int=False , __lowerCamelCase : int=False , **__lowerCamelCase : Union[str, Any] , ): SCREAMING_SNAKE_CASE = vocab_size SCREAMING_SNAKE_CASE = n_positions SCREAMING_SNAKE_CASE = n_embd SCREAMING_SNAKE_CASE = n_layer SCREAMING_SNAKE_CASE = n_head SCREAMING_SNAKE_CASE = n_inner SCREAMING_SNAKE_CASE = activation_function SCREAMING_SNAKE_CASE = resid_pdrop SCREAMING_SNAKE_CASE = embd_pdrop SCREAMING_SNAKE_CASE = attn_pdrop SCREAMING_SNAKE_CASE = layer_norm_epsilon SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = scale_attn_weights SCREAMING_SNAKE_CASE = use_cache SCREAMING_SNAKE_CASE = scale_attn_by_inverse_layer_idx SCREAMING_SNAKE_CASE = reorder_and_upcast_attn SCREAMING_SNAKE_CASE = tie_word_embeddings super().__init__(tie_word_embeddings=__lowerCamelCase , **__lowerCamelCase ) class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' @property def _snake_case ( self : Optional[Any] ): return OrderedDict( [ ("input_ids", {0: "batch", 1: "sequence"}), ] ) def _snake_case ( self : Optional[int] , __lowerCamelCase : "FeatureExtractionMixin" , __lowerCamelCase : int = 1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional["TensorType"] = None , __lowerCamelCase : int = 3 , __lowerCamelCase : int = 32 , __lowerCamelCase : int = 32 , ): SCREAMING_SNAKE_CASE = self._generate_dummy_images(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE = dict(preprocessor(images=__lowerCamelCase , return_tensors=__lowerCamelCase ) ) return inputs
698
1
from ...configuration_utils import PretrainedConfig from ...utils import logging __A : List[Any] = logging.get_logger(__name__) class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = "timm_backbone" def __init__( self : List[str] , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Tuple=3 , __lowerCamelCase : Any=True , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : List[str]=None , **__lowerCamelCase : Tuple , ): super().__init__(**__lowerCamelCase ) SCREAMING_SNAKE_CASE = backbone SCREAMING_SNAKE_CASE = num_channels SCREAMING_SNAKE_CASE = features_only SCREAMING_SNAKE_CASE = use_pretrained_backbone SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = out_indices if out_indices is not None else (-1,)
698
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments from transformers.testing_utils import TestCasePlus, require_torch, slow from transformers.utils import is_datasets_available if is_datasets_available(): import datasets class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' @slow @require_torch def _snake_case ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE = EncoderDecoderModel.from_encoder_decoder_pretrained("prajjwal1/bert-tiny" , "prajjwal1/bert-tiny" ) SCREAMING_SNAKE_CASE = BertTokenizer.from_pretrained("bert-base-uncased" ) SCREAMING_SNAKE_CASE = bertabert.config.encoder.vocab_size SCREAMING_SNAKE_CASE = tokenizer.sep_token_id SCREAMING_SNAKE_CASE = tokenizer.cls_token_id SCREAMING_SNAKE_CASE = 128 SCREAMING_SNAKE_CASE = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="train[:1%]" ) SCREAMING_SNAKE_CASE = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="validation[:1%]" ) SCREAMING_SNAKE_CASE = train_dataset.select(range(32 ) ) SCREAMING_SNAKE_CASE = val_dataset.select(range(16 ) ) SCREAMING_SNAKE_CASE = 4 def _map_to_encoder_decoder_inputs(__lowerCamelCase : str ): # Tokenizer will automatically set [BOS] <text> [EOS] SCREAMING_SNAKE_CASE = tokenizer(batch["article"] , padding="max_length" , truncation=__lowerCamelCase , max_length=512 ) SCREAMING_SNAKE_CASE = tokenizer(batch["highlights"] , padding="max_length" , truncation=__lowerCamelCase , max_length=128 ) SCREAMING_SNAKE_CASE = inputs.input_ids SCREAMING_SNAKE_CASE = inputs.attention_mask SCREAMING_SNAKE_CASE = outputs.input_ids SCREAMING_SNAKE_CASE = outputs.input_ids.copy() SCREAMING_SNAKE_CASE = [ [-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["labels"] ] SCREAMING_SNAKE_CASE = outputs.attention_mask assert all(len(__lowerCamelCase ) == 512 for x in inputs.input_ids ) assert all(len(__lowerCamelCase ) == 128 for x in outputs.input_ids ) return batch def _compute_metrics(__lowerCamelCase : Union[str, Any] ): SCREAMING_SNAKE_CASE = pred.label_ids SCREAMING_SNAKE_CASE = pred.predictions # all unnecessary tokens are removed SCREAMING_SNAKE_CASE = tokenizer.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase ) SCREAMING_SNAKE_CASE = tokenizer.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase ) SCREAMING_SNAKE_CASE = sum([int(pred_str[i] == label_str[i] ) for i in range(len(__lowerCamelCase ) )] ) / len(__lowerCamelCase ) return {"accuracy": accuracy} # map train dataset SCREAMING_SNAKE_CASE = train_dataset.map( _map_to_encoder_decoder_inputs , batched=__lowerCamelCase , batch_size=__lowerCamelCase , remove_columns=["article", "highlights"] , ) train_dataset.set_format( type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , ) # same for validation dataset SCREAMING_SNAKE_CASE = val_dataset.map( _map_to_encoder_decoder_inputs , batched=__lowerCamelCase , batch_size=__lowerCamelCase , remove_columns=["article", "highlights"] , ) val_dataset.set_format( type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , ) SCREAMING_SNAKE_CASE = self.get_auto_remove_tmp_dir() SCREAMING_SNAKE_CASE = SeqaSeqTrainingArguments( output_dir=__lowerCamelCase , per_device_train_batch_size=__lowerCamelCase , per_device_eval_batch_size=__lowerCamelCase , predict_with_generate=__lowerCamelCase , evaluation_strategy="steps" , do_train=__lowerCamelCase , do_eval=__lowerCamelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , ) # instantiate trainer SCREAMING_SNAKE_CASE = SeqaSeqTrainer( model=__lowerCamelCase , args=__lowerCamelCase , compute_metrics=_compute_metrics , train_dataset=__lowerCamelCase , eval_dataset=__lowerCamelCase , tokenizer=__lowerCamelCase , ) # start training trainer.train()
698
1
import os import random import sys from . import cryptomath_module as cryptomath from . import rabin_miller __A : int = 3 def __a ( A__ : int ): print("Generating primitive root of p" ) while True: SCREAMING_SNAKE_CASE = random.randrange(3 , A__ ) if pow(A__ , 2 , A__ ) == 1: continue if pow(A__ , A__ , A__ ) == 1: continue return g def __a ( A__ : int ): print("Generating prime p..." ) SCREAMING_SNAKE_CASE = rabin_miller.generate_large_prime(A__ ) # select large prime number. SCREAMING_SNAKE_CASE = primitive_root(A__ ) # one primitive root on modulo p. SCREAMING_SNAKE_CASE = random.randrange(3 , A__ ) # private_key -> have to be greater than 2 for safety. SCREAMING_SNAKE_CASE = cryptomath.find_mod_inverse(pow(A__ , A__ , A__ ) , A__ ) SCREAMING_SNAKE_CASE = (key_size, e_a, e_a, p) SCREAMING_SNAKE_CASE = (key_size, d) return public_key, private_key def __a ( A__ : str , A__ : int ): if os.path.exists(F"{name}_pubkey.txt" ) or os.path.exists(F"{name}_privkey.txt" ): print("\nWARNING:" ) print( F"\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n" "Use a different name or delete these files and re-run this program." ) sys.exit() SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = generate_key(A__ ) print(F"\nWriting public key to file {name}_pubkey.txt..." ) with open(F"{name}_pubkey.txt" , "w" ) as fo: fo.write(F"{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}" ) print(F"Writing private key to file {name}_privkey.txt..." ) with open(F"{name}_privkey.txt" , "w" ) as fo: fo.write(F"{private_key[0]},{private_key[1]}" ) def __a ( ): print("Making key files..." ) make_key_files("elgamal" , 2048 ) print("Key files generation successful" ) if __name__ == "__main__": main()
698
import inspect import unittest import warnings from math import ceil, floor from transformers import LevitConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_MAPPING, LevitForImageClassification, LevitForImageClassificationWithTeacher, LevitModel, ) from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import LevitImageProcessor class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' def _snake_case ( self : Optional[Any] ): SCREAMING_SNAKE_CASE = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(__lowerCamelCase , "hidden_sizes" ) ) self.parent.assertTrue(hasattr(__lowerCamelCase , "num_attention_heads" ) ) class _SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any]=13 , __lowerCamelCase : Optional[int]=64 , __lowerCamelCase : Dict=3 , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : int=2 , __lowerCamelCase : List[Any]=1 , __lowerCamelCase : Optional[Any]=16 , __lowerCamelCase : Tuple=[128, 256, 384] , __lowerCamelCase : int=[4, 6, 8] , __lowerCamelCase : Tuple=[2, 3, 4] , __lowerCamelCase : List[str]=[16, 16, 16] , __lowerCamelCase : int=0 , __lowerCamelCase : List[Any]=[2, 2, 2] , __lowerCamelCase : List[str]=[2, 2, 2] , __lowerCamelCase : List[str]=0.02 , __lowerCamelCase : Any=True , __lowerCamelCase : Dict=True , __lowerCamelCase : int=2 , ): SCREAMING_SNAKE_CASE = parent SCREAMING_SNAKE_CASE = batch_size SCREAMING_SNAKE_CASE = image_size SCREAMING_SNAKE_CASE = num_channels SCREAMING_SNAKE_CASE = kernel_size SCREAMING_SNAKE_CASE = stride SCREAMING_SNAKE_CASE = padding SCREAMING_SNAKE_CASE = hidden_sizes SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = depths SCREAMING_SNAKE_CASE = key_dim SCREAMING_SNAKE_CASE = drop_path_rate SCREAMING_SNAKE_CASE = patch_size SCREAMING_SNAKE_CASE = attention_ratio SCREAMING_SNAKE_CASE = mlp_ratio SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = [ ["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] SCREAMING_SNAKE_CASE = is_training SCREAMING_SNAKE_CASE = use_labels SCREAMING_SNAKE_CASE = num_labels SCREAMING_SNAKE_CASE = initializer_range def _snake_case ( self : int ): SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE = None if self.use_labels: SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels ) SCREAMING_SNAKE_CASE = self.get_config() return config, pixel_values, labels def _snake_case ( self : List[str] ): return LevitConfig( image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , ) def _snake_case ( self : Optional[int] , __lowerCamelCase : List[str] , __lowerCamelCase : str , __lowerCamelCase : Any ): SCREAMING_SNAKE_CASE = LevitModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() SCREAMING_SNAKE_CASE = model(__lowerCamelCase ) SCREAMING_SNAKE_CASE = (self.image_size, self.image_size) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = image_size[0], image_size[1] for _ in range(4 ): SCREAMING_SNAKE_CASE = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 ) SCREAMING_SNAKE_CASE = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , ) def _snake_case ( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str] ): SCREAMING_SNAKE_CASE = self.num_labels SCREAMING_SNAKE_CASE = LevitForImageClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() SCREAMING_SNAKE_CASE = model(__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _snake_case ( self : Optional[Any] ): SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = config_and_inputs SCREAMING_SNAKE_CASE = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = ( (LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher) if is_torch_available() else () ) lowerCamelCase__ = ( { "feature-extraction": LevitModel, "image-classification": (LevitForImageClassification, LevitForImageClassificationWithTeacher), } if is_torch_available() else {} ) lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False def _snake_case ( self : Dict ): SCREAMING_SNAKE_CASE = LevitModelTester(self ) SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 ) def _snake_case ( self : Any ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _snake_case ( self : Any ): return @unittest.skip(reason="Levit does not use inputs_embeds" ) def _snake_case ( self : Union[str, Any] ): pass @unittest.skip(reason="Levit does not support input and output embeddings" ) def _snake_case ( self : Tuple ): pass @unittest.skip(reason="Levit does not output attentions" ) def _snake_case ( self : Tuple ): pass def _snake_case ( self : Dict ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase ) SCREAMING_SNAKE_CASE = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCamelCase ) def _snake_case ( self : List[Any] ): def check_hidden_states_output(__lowerCamelCase : List[str] , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] ): SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) ) SCREAMING_SNAKE_CASE = outputs.hidden_states SCREAMING_SNAKE_CASE = len(self.model_tester.depths ) + 1 self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase ) SCREAMING_SNAKE_CASE = (self.model_tester.image_size, self.model_tester.image_size) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = image_size[0], image_size[1] for _ in range(4 ): SCREAMING_SNAKE_CASE = floor( ( (height + 2 * self.model_tester.padding - self.model_tester.kernel_size) / self.model_tester.stride ) + 1 ) SCREAMING_SNAKE_CASE = floor( ( (width + 2 * self.model_tester.padding - self.model_tester.kernel_size) / self.model_tester.stride ) + 1 ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [ height * width, self.model_tester.hidden_sizes[0], ] , ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def _snake_case ( self : int ): pass def _snake_case ( self : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : int=False ): SCREAMING_SNAKE_CASE = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) if return_labels: if model_class.__name__ == "LevitForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def _snake_case ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def _snake_case ( self : str ): SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase ) def _snake_case ( self : List[Any] ): if not self.model_tester.is_training: return SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE = True for model_class in self.all_model_classes: # LevitForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(__lowerCamelCase ) or model_class.__name__ == "LevitForImageClassificationWithTeacher" ): continue SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.train() SCREAMING_SNAKE_CASE = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) SCREAMING_SNAKE_CASE = model(**__lowerCamelCase ).loss loss.backward() def _snake_case ( self : Tuple ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = True for model_class in self.all_model_classes: if model_class in get_values(__lowerCamelCase ) or not model_class.supports_gradient_checkpointing: continue # LevitForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "LevitForImageClassificationWithTeacher": continue SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase ) model.gradient_checkpointing_enable() model.to(__lowerCamelCase ) model.train() SCREAMING_SNAKE_CASE = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) SCREAMING_SNAKE_CASE = model(**__lowerCamelCase ).loss loss.backward() def _snake_case ( self : List[str] ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE = [ {"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float}, {"title": "single_label_classification", "num_labels": 1, "dtype": torch.long}, {"title": "regression", "num_labels": 1, "dtype": torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(__lowerCamelCase ), ] or model_class.__name__ == "LevitForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=f"Testing {model_class} with {problem_type['title']}" ): SCREAMING_SNAKE_CASE = problem_type["title"] SCREAMING_SNAKE_CASE = problem_type["num_labels"] SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.train() SCREAMING_SNAKE_CASE = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) if problem_type["num_labels"] > 1: SCREAMING_SNAKE_CASE = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] ) SCREAMING_SNAKE_CASE = inputs["labels"].to(problem_type["dtype"] ) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=__lowerCamelCase ) as warning_list: SCREAMING_SNAKE_CASE = model(**__lowerCamelCase ).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message ): raise ValueError( f"Something is going wrong in the regression problem: intercepted {w.message}" ) loss.backward() @slow def _snake_case ( self : List[Any] ): for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE = LevitModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) def __a ( ): SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' @cached_property def _snake_case ( self : Dict ): return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) @slow def _snake_case ( self : Optional[int] ): SCREAMING_SNAKE_CASE = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to( __lowerCamelCase ) SCREAMING_SNAKE_CASE = self.default_image_processor SCREAMING_SNAKE_CASE = prepare_img() SCREAMING_SNAKE_CASE = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE = model(**__lowerCamelCase ) # verify the logits SCREAMING_SNAKE_CASE = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , __lowerCamelCase ) SCREAMING_SNAKE_CASE = torch.tensor([1.0_448, -0.3_745, -1.8_317] ).to(__lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
698
1
import argparse import os import re import torch from flax.traverse_util import flatten_dict from tax import checkpoints from transformers import ( AutoTokenizer, PixaStructConfig, PixaStructForConditionalGeneration, PixaStructImageProcessor, PixaStructProcessor, PixaStructTextConfig, PixaStructVisionConfig, ) def __a ( A__ : Tuple ): SCREAMING_SNAKE_CASE = checkpoints.load_tax_checkpoint(A__ ) SCREAMING_SNAKE_CASE = flatten_dict(A__ ) return flax_params def __a ( A__ : Union[str, Any] ): SCREAMING_SNAKE_CASE = {} SCREAMING_SNAKE_CASE = { "token_embedder": "embeddings", "encoder_norm": "layernorm", "kernel": "weight", ".out": ".output", "scale": "weight", "embedders_0.pos_embedding": "row_embedder.weight", "embedders_1.pos_embedding": "column_embedder.weight", } SCREAMING_SNAKE_CASE = { "query": "attention.query", "key": "attention.key", "value": "attention.value", "output.dense": "output", "encoder_decoder_attention.o": "encoder_decoder_attention.attention.o", "pre_self_attention_layer_norm": "self_attention.layer_norm", "pre_cross_attention_layer_norm": "encoder_decoder_attention.layer_norm", "mlp.": "mlp.DenseReluDense.", "pre_mlp_layer_norm": "mlp.layer_norm", "self_attention.o": "self_attention.attention.o", "decoder.embeddings.embedding": "decoder.embed_tokens.weight", "decoder.relpos_bias.rel_embedding": "decoder.layer.0.self_attention.attention.relative_attention_bias.weight", "decoder.decoder_norm.weight": "decoder.final_layer_norm.weight", "decoder.logits_dense.weight": "decoder.lm_head.weight", } for key in flax_dict.keys(): if "target" in key: # remove the first prefix from the key SCREAMING_SNAKE_CASE = ".".join(key[1:] ) # rename the key for old, new in CONVERSION_MAPPING.items(): SCREAMING_SNAKE_CASE = new_key.replace(A__ , A__ ) if "decoder" in new_key: for old, new in DECODER_CONVERSION_MAPPING.items(): SCREAMING_SNAKE_CASE = new_key.replace(A__ , A__ ) if "layers" in new_key and "decoder" not in new_key: # use regex to replace the layer number SCREAMING_SNAKE_CASE = re.sub(R"layers_(\d+)" , R"layer.\1" , A__ ) SCREAMING_SNAKE_CASE = new_key.replace("encoder" , "encoder.encoder" ) elif "layers" in new_key and "decoder" in new_key: # use regex to replace the layer number SCREAMING_SNAKE_CASE = re.sub(R"layers_(\d+)" , R"layer.\1" , A__ ) SCREAMING_SNAKE_CASE = flax_dict[key] SCREAMING_SNAKE_CASE = {} # convert converted_dict into torch format for key in converted_dict.keys(): if ("embed_tokens" not in key) and ("embedder" not in key): SCREAMING_SNAKE_CASE = torch.from_numpy(converted_dict[key].T ) else: SCREAMING_SNAKE_CASE = torch.from_numpy(converted_dict[key] ) return converted_torch_dict def __a ( A__ : Any , A__ : List[Any] , A__ : Any=False , A__ : Tuple=False ): SCREAMING_SNAKE_CASE = get_flax_param(A__ ) if not use_large: SCREAMING_SNAKE_CASE = PixaStructVisionConfig() SCREAMING_SNAKE_CASE = PixaStructTextConfig() else: SCREAMING_SNAKE_CASE = PixaStructVisionConfig( hidden_size=1536 , d_ff=3968 , num_attention_heads=24 , num_hidden_layers=18 ) SCREAMING_SNAKE_CASE = PixaStructTextConfig(hidden_size=1536 , d_ff=3968 , num_heads=24 , num_layers=18 ) SCREAMING_SNAKE_CASE = PixaStructConfig( vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=A__ ) SCREAMING_SNAKE_CASE = PixaStructForConditionalGeneration(A__ ) SCREAMING_SNAKE_CASE = rename_and_convert_flax_params(A__ ) model.load_state_dict(A__ ) SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("ybelkada/test-pix2struct-tokenizer" ) SCREAMING_SNAKE_CASE = PixaStructImageProcessor() SCREAMING_SNAKE_CASE = PixaStructProcessor(image_processor=A__ , tokenizer=A__ ) if use_large: SCREAMING_SNAKE_CASE = 4096 SCREAMING_SNAKE_CASE = True # mkdir if needed os.makedirs(A__ , exist_ok=A__ ) model.save_pretrained(A__ ) processor.save_pretrained(A__ ) print("Model saved in {}".format(A__ ) ) if __name__ == "__main__": __A : Optional[int] = argparse.ArgumentParser() parser.add_argument('--t5x_checkpoint_path', default=None, type=str, help='Path to the original T5x checkpoint.') parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--use_large', action='store_true', help='Use large model.') parser.add_argument('--is_vqa', action='store_true', help='Use large model.') __A : int = parser.parse_args() convert_pixastruct_original_pytorch_checkpoint_to_hf( args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large )
698
import unittest from pathlib import Path from tempfile import NamedTemporaryFile, TemporaryDirectory from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline from transformers.convert_graph_to_onnx import ( convert, ensure_valid_input, generate_identified_filename, infer_shapes, quantize, ) from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow class _SCREAMING_SNAKE_CASE : '''simple docstring''' def _snake_case ( self : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any ): return None class _SCREAMING_SNAKE_CASE : '''simple docstring''' def _snake_case ( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : int ): return None class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = [ # (model_name, model_kwargs) ("bert-base-cased", {}), ("gpt2", {"use_cache": False}), # We don't support exporting GPT2 past keys anymore ] @require_tf @slow def _snake_case ( self : List[Any] ): for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(__lowerCamelCase , "tf" , 12 , **__lowerCamelCase ) @require_torch @slow def _snake_case ( self : List[Any] ): for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(__lowerCamelCase , "pt" , 12 , **__lowerCamelCase ) @require_torch @slow def _snake_case ( self : Optional[int] ): from transformers import BertModel SCREAMING_SNAKE_CASE = ["[UNK]", "[SEP]", "[CLS]", "[PAD]", "[MASK]", "some", "other", "words"] with NamedTemporaryFile(mode="w+t" ) as vocab_file: vocab_file.write("\n".join(__lowerCamelCase ) ) vocab_file.flush() SCREAMING_SNAKE_CASE = BertTokenizerFast(vocab_file.name ) with TemporaryDirectory() as bert_save_dir: SCREAMING_SNAKE_CASE = BertModel(BertConfig(vocab_size=len(__lowerCamelCase ) ) ) model.save_pretrained(__lowerCamelCase ) self._test_export(__lowerCamelCase , "pt" , 12 , __lowerCamelCase ) @require_tf @slow def _snake_case ( self : Any ): for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: SCREAMING_SNAKE_CASE = self._test_export(__lowerCamelCase , "tf" , 12 , **__lowerCamelCase ) SCREAMING_SNAKE_CASE = quantize(Path(__lowerCamelCase ) ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(__lowerCamelCase ).stat().st_size: self.fail("Quantized model is bigger than initial ONNX model" ) @require_torch @slow def _snake_case ( self : Any ): for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: SCREAMING_SNAKE_CASE = self._test_export(__lowerCamelCase , "pt" , 12 , **__lowerCamelCase ) SCREAMING_SNAKE_CASE = quantize(__lowerCamelCase ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(__lowerCamelCase ).stat().st_size: self.fail("Quantized model is bigger than initial ONNX model" ) def _snake_case ( self : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : int , __lowerCamelCase : Tuple=None , **__lowerCamelCase : List[str] ): try: # Compute path with TemporaryDirectory() as tempdir: SCREAMING_SNAKE_CASE = Path(__lowerCamelCase ).joinpath("model.onnx" ) # Remove folder if exists if path.parent.exists(): path.parent.rmdir() # Export convert(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ) return path except Exception as e: self.fail(__lowerCamelCase ) @require_torch @require_tokenizers @slow def _snake_case ( self : Dict ): from transformers import BertModel SCREAMING_SNAKE_CASE = BertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) ) SCREAMING_SNAKE_CASE = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" ) self._test_infer_dynamic_axis(__lowerCamelCase , __lowerCamelCase , "pt" ) @require_tf @require_tokenizers @slow def _snake_case ( self : int ): from transformers import TFBertModel SCREAMING_SNAKE_CASE = TFBertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) ) SCREAMING_SNAKE_CASE = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" ) self._test_infer_dynamic_axis(__lowerCamelCase , __lowerCamelCase , "tf" ) def _snake_case ( self : int , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str ): SCREAMING_SNAKE_CASE = FeatureExtractionPipeline(__lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE = ["input_ids", "token_type_ids", "attention_mask", "output_0", "output_1"] SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = infer_shapes(__lowerCamelCase , __lowerCamelCase ) # Assert all variables are present self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) ) self.assertTrue(all(var_name in shapes for var_name in variable_names ) ) self.assertSequenceEqual(variable_names[:3] , __lowerCamelCase ) self.assertSequenceEqual(variable_names[3:] , __lowerCamelCase ) # Assert inputs are {0: batch, 1: sequence} for var_name in ["input_ids", "token_type_ids", "attention_mask"]: self.assertDictEqual(shapes[var_name] , {0: "batch", 1: "sequence"} ) # Assert outputs are {0: batch, 1: sequence} and {0: batch} self.assertDictEqual(shapes["output_0"] , {0: "batch", 1: "sequence"} ) self.assertDictEqual(shapes["output_1"] , {0: "batch"} ) def _snake_case ( self : Optional[Any] ): SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask", "token_type_ids"] SCREAMING_SNAKE_CASE = {"input_ids": [1, 2, 3, 4], "attention_mask": [0, 0, 0, 0], "token_type_ids": [1, 1, 1, 1]} SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = ensure_valid_input(FuncContiguousArgs() , __lowerCamelCase , __lowerCamelCase ) # Should have exactly the same number of args (all are valid) self.assertEqual(len(__lowerCamelCase ) , 3 ) # Should have exactly the same input names self.assertEqual(set(__lowerCamelCase ) , set(__lowerCamelCase ) ) # Parameter should be reordered according to their respective place in the function: # (input_ids, token_type_ids, attention_mask) self.assertEqual(__lowerCamelCase , (tokens["input_ids"], tokens["token_type_ids"], tokens["attention_mask"]) ) # Generated args are interleaved with another args (for instance parameter "past" in GPT2) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = ensure_valid_input(FuncNonContiguousArgs() , __lowerCamelCase , __lowerCamelCase ) # Should have exactly the one arg (all before the one not provided "some_other_args") self.assertEqual(len(__lowerCamelCase ) , 1 ) self.assertEqual(len(__lowerCamelCase ) , 1 ) # Should have only "input_ids" self.assertEqual(inputs_args[0] , tokens["input_ids"] ) self.assertEqual(ordered_input_names[0] , "input_ids" ) def _snake_case ( self : Optional[int] ): SCREAMING_SNAKE_CASE = generate_identified_filename(Path("/home/something/my_fake_model.onnx" ) , "-test" ) self.assertEqual("/home/something/my_fake_model-test.onnx" , generated.as_posix() )
698
1
import torch from diffusers import UnCLIPScheduler from .test_schedulers import SchedulerCommonTest class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = (UnCLIPScheduler,) def _snake_case ( self : Union[str, Any] , **__lowerCamelCase : Tuple ): SCREAMING_SNAKE_CASE = { "num_train_timesteps": 1000, "variance_type": "fixed_small_log", "clip_sample": True, "clip_sample_range": 1.0, "prediction_type": "epsilon", } config.update(**__lowerCamelCase ) return config def _snake_case ( self : Any ): for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=__lowerCamelCase ) def _snake_case ( self : List[str] ): for variance in ["fixed_small_log", "learned_range"]: self.check_over_configs(variance_type=__lowerCamelCase ) def _snake_case ( self : Optional[int] ): for clip_sample in [True, False]: self.check_over_configs(clip_sample=__lowerCamelCase ) def _snake_case ( self : str ): for clip_sample_range in [1, 5, 10, 20]: self.check_over_configs(clip_sample_range=__lowerCamelCase ) def _snake_case ( self : List[Any] ): for prediction_type in ["epsilon", "sample"]: self.check_over_configs(prediction_type=__lowerCamelCase ) def _snake_case ( self : Union[str, Any] ): for time_step in [0, 500, 999]: for prev_timestep in [None, 5, 100, 250, 500, 750]: if prev_timestep is not None and prev_timestep >= time_step: continue self.check_over_forward(time_step=__lowerCamelCase , prev_timestep=__lowerCamelCase ) def _snake_case ( self : int ): SCREAMING_SNAKE_CASE = self.scheduler_classes[0] SCREAMING_SNAKE_CASE = self.get_scheduler_config(variance_type="fixed_small_log" ) SCREAMING_SNAKE_CASE = scheduler_class(**__lowerCamelCase ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.00_00e-10 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_549_625 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_994_987 ) ) < 1e-5 def _snake_case ( self : Dict ): SCREAMING_SNAKE_CASE = self.scheduler_classes[0] SCREAMING_SNAKE_CASE = self.get_scheduler_config(variance_type="learned_range" ) SCREAMING_SNAKE_CASE = scheduler_class(**__lowerCamelCase ) SCREAMING_SNAKE_CASE = 0.5 assert scheduler._get_variance(1 , predicted_variance=__lowerCamelCase ) - -10.1_712_790 < 1e-5 assert scheduler._get_variance(487 , predicted_variance=__lowerCamelCase ) - -5.7_998_052 < 1e-5 assert scheduler._get_variance(999 , predicted_variance=__lowerCamelCase ) - -0.0_010_011 < 1e-5 def _snake_case ( self : int ): SCREAMING_SNAKE_CASE = self.scheduler_classes[0] SCREAMING_SNAKE_CASE = self.get_scheduler_config() SCREAMING_SNAKE_CASE = scheduler_class(**__lowerCamelCase ) SCREAMING_SNAKE_CASE = scheduler.timesteps SCREAMING_SNAKE_CASE = self.dummy_model() SCREAMING_SNAKE_CASE = self.dummy_sample_deter SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) for i, t in enumerate(__lowerCamelCase ): # 1. predict noise residual SCREAMING_SNAKE_CASE = model(__lowerCamelCase , __lowerCamelCase ) # 2. predict previous mean of sample x_t-1 SCREAMING_SNAKE_CASE = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , generator=__lowerCamelCase ).prev_sample SCREAMING_SNAKE_CASE = pred_prev_sample SCREAMING_SNAKE_CASE = torch.sum(torch.abs(__lowerCamelCase ) ) SCREAMING_SNAKE_CASE = torch.mean(torch.abs(__lowerCamelCase ) ) assert abs(result_sum.item() - 252.2_682_495 ) < 1e-2 assert abs(result_mean.item() - 0.3_284_743 ) < 1e-3 def _snake_case ( self : Tuple ): SCREAMING_SNAKE_CASE = self.scheduler_classes[0] SCREAMING_SNAKE_CASE = self.get_scheduler_config() SCREAMING_SNAKE_CASE = scheduler_class(**__lowerCamelCase ) scheduler.set_timesteps(25 ) SCREAMING_SNAKE_CASE = scheduler.timesteps SCREAMING_SNAKE_CASE = self.dummy_model() SCREAMING_SNAKE_CASE = self.dummy_sample_deter SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) for i, t in enumerate(__lowerCamelCase ): # 1. predict noise residual SCREAMING_SNAKE_CASE = model(__lowerCamelCase , __lowerCamelCase ) if i + 1 == timesteps.shape[0]: SCREAMING_SNAKE_CASE = None else: SCREAMING_SNAKE_CASE = timesteps[i + 1] # 2. predict previous mean of sample x_t-1 SCREAMING_SNAKE_CASE = scheduler.step( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , prev_timestep=__lowerCamelCase , generator=__lowerCamelCase ).prev_sample SCREAMING_SNAKE_CASE = pred_prev_sample SCREAMING_SNAKE_CASE = torch.sum(torch.abs(__lowerCamelCase ) ) SCREAMING_SNAKE_CASE = torch.mean(torch.abs(__lowerCamelCase ) ) assert abs(result_sum.item() - 258.2_044_983 ) < 1e-2 assert abs(result_mean.item() - 0.3_362_038 ) < 1e-3 def _snake_case ( self : Dict ): pass def _snake_case ( self : int ): pass
698
from manim import * class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' def _snake_case ( self : List[Any] ): SCREAMING_SNAKE_CASE = Rectangle(height=0.5 , width=0.5 ) SCREAMING_SNAKE_CASE = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )] SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )] SCREAMING_SNAKE_CASE = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 ) SCREAMING_SNAKE_CASE = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 ) SCREAMING_SNAKE_CASE = VGroup(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0 ) SCREAMING_SNAKE_CASE = Text("CPU" , font_size=24 ) SCREAMING_SNAKE_CASE = Group(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0.5 , aligned_edge=__lowerCamelCase ) cpu.move_to([-2.5, -0.5, 0] ) self.add(__lowerCamelCase ) SCREAMING_SNAKE_CASE = [mem.copy() for i in range(4 )] SCREAMING_SNAKE_CASE = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 ) SCREAMING_SNAKE_CASE = Text("GPU" , font_size=24 ) SCREAMING_SNAKE_CASE = Group(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0.5 , aligned_edge=__lowerCamelCase ) gpu.move_to([-1, -1, 0] ) self.add(__lowerCamelCase ) SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )] SCREAMING_SNAKE_CASE = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 ) SCREAMING_SNAKE_CASE = Text("Model" , font_size=24 ) SCREAMING_SNAKE_CASE = Group(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0.5 , aligned_edge=__lowerCamelCase ) model.move_to([3, -1.0, 0] ) self.add(__lowerCamelCase ) SCREAMING_SNAKE_CASE = [] for i, rect in enumerate(__lowerCamelCase ): rect.set_stroke(__lowerCamelCase ) # target = fill.copy().set_fill(YELLOW, opacity=0.7) # target.move_to(rect) # self.add(target) SCREAMING_SNAKE_CASE = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__lowerCamelCase , opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__lowerCamelCase ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(cpu_targs[0] , direction=__lowerCamelCase , buff=0.0 ) else: cpu_target.next_to(cpu_targs[i - 1] , direction=__lowerCamelCase , buff=0.0 ) self.add(__lowerCamelCase ) cpu_targs.append(__lowerCamelCase ) SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )] SCREAMING_SNAKE_CASE = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 ) SCREAMING_SNAKE_CASE = Text("Loaded Checkpoint" , font_size=24 ) SCREAMING_SNAKE_CASE = Group(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , aligned_edge=__lowerCamelCase , buff=0.4 ) checkpoint.move_to([3, 0.5, 0] ) SCREAMING_SNAKE_CASE = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) SCREAMING_SNAKE_CASE = MarkupText( f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(__lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE = MarkupText( f"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , ) blue_text.next_to(__lowerCamelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() ) SCREAMING_SNAKE_CASE = MarkupText( f"Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>." , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(__lowerCamelCase ) , Write(__lowerCamelCase ) ) self.play(Write(__lowerCamelCase , run_time=1 ) , Create(__lowerCamelCase , run_time=1 ) ) SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = [] for i, rect in enumerate(__lowerCamelCase ): SCREAMING_SNAKE_CASE = fill.copy().set_fill(__lowerCamelCase , opacity=0.7 ) target.move_to(__lowerCamelCase ) first_animations.append(GrowFromCenter(__lowerCamelCase , run_time=1 ) ) SCREAMING_SNAKE_CASE = target.copy() cpu_target.generate_target() if i < 5: cpu_target.target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.target.move_to(cpu_right_col_base[i - 5] ) second_animations.append(MoveToTarget(__lowerCamelCase , run_time=1.5 ) ) self.play(*__lowerCamelCase ) self.play(*__lowerCamelCase ) self.wait()
698
1
import logging import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.bert.modeling_bert import ( BERT_INPUTS_DOCSTRING, BERT_START_DOCSTRING, BertEncoder, BertModel, BertPreTrainedModel, ) __A : Optional[int] = logging.getLogger(__name__) class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' def _snake_case ( self : Any , __lowerCamelCase : str , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Tuple=None ): SCREAMING_SNAKE_CASE = self.layer[current_layer](__lowerCamelCase , __lowerCamelCase , head_mask[current_layer] ) SCREAMING_SNAKE_CASE = layer_outputs[0] return hidden_states @add_start_docstrings( "The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , __snake_case , ) class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' def __init__( self : str , __lowerCamelCase : Any ): super().__init__(__lowerCamelCase ) SCREAMING_SNAKE_CASE = BertEncoderWithPabee(__lowerCamelCase ) self.init_weights() SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = 0 def _snake_case ( self : List[Any] , __lowerCamelCase : int ): SCREAMING_SNAKE_CASE = threshold def _snake_case ( self : Tuple , __lowerCamelCase : Optional[int] ): SCREAMING_SNAKE_CASE = patience def _snake_case ( self : Optional[int] ): SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = 0 def _snake_case ( self : Dict ): SCREAMING_SNAKE_CASE = self.inference_layers_num / self.inference_instances_num SCREAMING_SNAKE_CASE = ( f"*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up =" f" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***" ) print(__lowerCamelCase ) @add_start_docstrings_to_model_forward(__lowerCamelCase ) def _snake_case ( self : List[str] , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : int=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : List[str]=None , __lowerCamelCase : Any=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : int=False , ): if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" ) elif input_ids is not None: SCREAMING_SNAKE_CASE = input_ids.size() elif inputs_embeds is not None: SCREAMING_SNAKE_CASE = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds" ) SCREAMING_SNAKE_CASE = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: SCREAMING_SNAKE_CASE = torch.ones(__lowerCamelCase , device=__lowerCamelCase ) if token_type_ids is None: SCREAMING_SNAKE_CASE = torch.zeros(__lowerCamelCase , dtype=torch.long , device=__lowerCamelCase ) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. SCREAMING_SNAKE_CASE = self.get_extended_attention_mask(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # If a 2D ou 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.config.is_decoder and encoder_hidden_states is not None: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = encoder_hidden_states.size() SCREAMING_SNAKE_CASE = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: SCREAMING_SNAKE_CASE = torch.ones(__lowerCamelCase , device=__lowerCamelCase ) SCREAMING_SNAKE_CASE = self.invert_attention_mask(__lowerCamelCase ) else: SCREAMING_SNAKE_CASE = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] SCREAMING_SNAKE_CASE = self.get_head_mask(__lowerCamelCase , self.config.num_hidden_layers ) SCREAMING_SNAKE_CASE = self.embeddings( input_ids=__lowerCamelCase , position_ids=__lowerCamelCase , token_type_ids=__lowerCamelCase , inputs_embeds=__lowerCamelCase ) SCREAMING_SNAKE_CASE = embedding_output if self.training: SCREAMING_SNAKE_CASE = [] for i in range(self.config.num_hidden_layers ): SCREAMING_SNAKE_CASE = self.encoder.adaptive_forward( __lowerCamelCase , current_layer=__lowerCamelCase , attention_mask=__lowerCamelCase , head_mask=__lowerCamelCase ) SCREAMING_SNAKE_CASE = self.pooler(__lowerCamelCase ) SCREAMING_SNAKE_CASE = output_layers[i](output_dropout(__lowerCamelCase ) ) res.append(__lowerCamelCase ) elif self.patience == 0: # Use all layers for inference SCREAMING_SNAKE_CASE = self.encoder( __lowerCamelCase , attention_mask=__lowerCamelCase , head_mask=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , ) SCREAMING_SNAKE_CASE = self.pooler(encoder_outputs[0] ) SCREAMING_SNAKE_CASE = [output_layers[self.config.num_hidden_layers - 1](__lowerCamelCase )] else: SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = 0 for i in range(self.config.num_hidden_layers ): calculated_layer_num += 1 SCREAMING_SNAKE_CASE = self.encoder.adaptive_forward( __lowerCamelCase , current_layer=__lowerCamelCase , attention_mask=__lowerCamelCase , head_mask=__lowerCamelCase ) SCREAMING_SNAKE_CASE = self.pooler(__lowerCamelCase ) SCREAMING_SNAKE_CASE = output_layers[i](__lowerCamelCase ) if regression: SCREAMING_SNAKE_CASE = logits.detach() if patient_result is not None: SCREAMING_SNAKE_CASE = patient_result.detach() if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold: patient_counter += 1 else: SCREAMING_SNAKE_CASE = 0 else: SCREAMING_SNAKE_CASE = logits.detach().argmax(dim=1 ) if patient_result is not None: SCREAMING_SNAKE_CASE = patient_result.detach().argmax(dim=1 ) if (patient_result is not None) and torch.all(labels.eq(__lowerCamelCase ) ): patient_counter += 1 else: SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = logits if patient_counter == self.patience: break SCREAMING_SNAKE_CASE = [patient_result] self.inference_layers_num += calculated_layer_num self.inference_instances_num += 1 return res @add_start_docstrings( "Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , __snake_case , ) class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' def __init__( self : List[Any] , __lowerCamelCase : Any ): super().__init__(__lowerCamelCase ) SCREAMING_SNAKE_CASE = config.num_labels SCREAMING_SNAKE_CASE = BertModelWithPabee(__lowerCamelCase ) SCREAMING_SNAKE_CASE = nn.Dropout(config.hidden_dropout_prob ) SCREAMING_SNAKE_CASE = nn.ModuleList( [nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] ) self.init_weights() @add_start_docstrings_to_model_forward(__lowerCamelCase ) def _snake_case ( self : Any , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : int=None , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : str=None , __lowerCamelCase : List[Any]=None , __lowerCamelCase : str=None , ): SCREAMING_SNAKE_CASE = self.bert( input_ids=__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , position_ids=__lowerCamelCase , head_mask=__lowerCamelCase , inputs_embeds=__lowerCamelCase , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , ) SCREAMING_SNAKE_CASE = (logits[-1],) if labels is not None: SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = 0 for ix, logits_item in enumerate(__lowerCamelCase ): if self.num_labels == 1: # We are doing regression SCREAMING_SNAKE_CASE = MSELoss() SCREAMING_SNAKE_CASE = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) ) else: SCREAMING_SNAKE_CASE = CrossEntropyLoss() SCREAMING_SNAKE_CASE = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) ) if total_loss is None: SCREAMING_SNAKE_CASE = loss else: total_loss += loss * (ix + 1) total_weights += ix + 1 SCREAMING_SNAKE_CASE = (total_loss / total_weights,) + outputs return outputs
698
import unittest from transformers import is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class _SCREAMING_SNAKE_CASE : '''simple docstring''' @staticmethod def _snake_case ( *__lowerCamelCase : Any , **__lowerCamelCase : Any ): pass @is_pipeline_test @require_vision class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' @require_torch def _snake_case ( self : Dict ): SCREAMING_SNAKE_CASE = pipeline( model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , ) SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) SCREAMING_SNAKE_CASE = image_classifier(__lowerCamelCase , candidate_labels=["a", "b", "c"] ) # The floating scores are so close, we enter floating error approximation and the order is not guaranteed across # python and torch versions. self.assertIn( nested_simplify(__lowerCamelCase ) , [ [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}], [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}], ] , ) SCREAMING_SNAKE_CASE = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 ) self.assertEqual( nested_simplify(__lowerCamelCase ) , [ [ {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, ], [ {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, ], [ {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, ], [ {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, ], [ {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, ], ] , ) @require_tf def _snake_case ( self : int ): SCREAMING_SNAKE_CASE = pipeline( model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , framework="tf" ) SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) SCREAMING_SNAKE_CASE = image_classifier(__lowerCamelCase , candidate_labels=["a", "b", "c"] ) self.assertEqual( nested_simplify(__lowerCamelCase ) , [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}] , ) SCREAMING_SNAKE_CASE = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 ) self.assertEqual( nested_simplify(__lowerCamelCase ) , [ [ {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, ], [ {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, ], [ {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, ], [ {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, ], [ {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, ], ] , ) @slow @require_torch def _snake_case ( self : Tuple ): SCREAMING_SNAKE_CASE = pipeline( task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , ) # This is an image of 2 cats with remotes and no planes SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) SCREAMING_SNAKE_CASE = image_classifier(__lowerCamelCase , candidate_labels=["cat", "plane", "remote"] ) self.assertEqual( nested_simplify(__lowerCamelCase ) , [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ] , ) SCREAMING_SNAKE_CASE = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 ) self.assertEqual( nested_simplify(__lowerCamelCase ) , [ [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ] * 5 , ) @slow @require_tf def _snake_case ( self : Any ): SCREAMING_SNAKE_CASE = pipeline( task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , framework="tf" ) # This is an image of 2 cats with remotes and no planes SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) SCREAMING_SNAKE_CASE = image_classifier(__lowerCamelCase , candidate_labels=["cat", "plane", "remote"] ) self.assertEqual( nested_simplify(__lowerCamelCase ) , [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ] , ) SCREAMING_SNAKE_CASE = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 ) self.assertEqual( nested_simplify(__lowerCamelCase ) , [ [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ] * 5 , )
698
1
from random import randint from tempfile import TemporaryFile import numpy as np def __a ( A__ : List[Any] , A__ : List[Any] , A__ : Any ): SCREAMING_SNAKE_CASE = 0 if start < end: SCREAMING_SNAKE_CASE = randint(A__ , A__ ) SCREAMING_SNAKE_CASE = a[end] SCREAMING_SNAKE_CASE = a[pivot] SCREAMING_SNAKE_CASE = temp SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = _in_place_partition(A__ , A__ , A__ ) count += _in_place_quick_sort(A__ , A__ , p - 1 ) count += _in_place_quick_sort(A__ , p + 1 , A__ ) return count def __a ( A__ : List[Any] , A__ : Any , A__ : Tuple ): SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = randint(A__ , A__ ) SCREAMING_SNAKE_CASE = a[end] SCREAMING_SNAKE_CASE = a[pivot] SCREAMING_SNAKE_CASE = temp SCREAMING_SNAKE_CASE = start - 1 for index in range(A__ , A__ ): count += 1 if a[index] < a[end]: # check if current val is less than pivot value SCREAMING_SNAKE_CASE = new_pivot_index + 1 SCREAMING_SNAKE_CASE = a[new_pivot_index] SCREAMING_SNAKE_CASE = a[index] SCREAMING_SNAKE_CASE = temp SCREAMING_SNAKE_CASE = a[new_pivot_index + 1] SCREAMING_SNAKE_CASE = a[end] SCREAMING_SNAKE_CASE = temp return new_pivot_index + 1, count __A : int = TemporaryFile() __A : List[str] = 1_0_0 # 1000 elements are to be sorted __A , __A : List[str] = 0, 1 # mean and standard deviation __A : Tuple = np.random.normal(mu, sigma, p) np.save(outfile, X) print('The array is') print(X) outfile.seek(0) # using the same array __A : Optional[int] = np.load(outfile) __A : Union[str, Any] = len(M) - 1 __A : List[str] = _in_place_quick_sort(M, 0, r) print( 'No of Comparisons for 100 elements selected from a standard normal distribution' 'is :' ) print(z)
698
__A : dict[str, float] = { "joule": 1.0, "kilojoule": 1_0_0_0, "megajoule": 1_0_0_0_0_0_0, "gigajoule": 1_0_0_0_0_0_0_0_0_0, "wattsecond": 1.0, "watthour": 3_6_0_0, "kilowatthour": 3_6_0_0_0_0_0, "newtonmeter": 1.0, "calorie_nutr": 4_1_8_6.8, "kilocalorie_nutr": 4_1_8_6_8_0_0.0_0, "electronvolt": 1.6_0217_6634e-19, "britishthermalunit_it": 1_0_5_5.0_5_5_8_5, "footpound": 1.355_818, } def __a ( A__ : str , A__ : str , A__ : float ): if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION: SCREAMING_SNAKE_CASE = ( F"Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n" F"Valid values are: {', '.join(A__ )}" ) raise ValueError(A__ ) return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type] if __name__ == "__main__": import doctest doctest.testmod()
698
1
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments from transformers.testing_utils import TestCasePlus, require_torch, slow from transformers.utils import is_datasets_available if is_datasets_available(): import datasets class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' @slow @require_torch def _snake_case ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE = EncoderDecoderModel.from_encoder_decoder_pretrained("prajjwal1/bert-tiny" , "prajjwal1/bert-tiny" ) SCREAMING_SNAKE_CASE = BertTokenizer.from_pretrained("bert-base-uncased" ) SCREAMING_SNAKE_CASE = bertabert.config.encoder.vocab_size SCREAMING_SNAKE_CASE = tokenizer.sep_token_id SCREAMING_SNAKE_CASE = tokenizer.cls_token_id SCREAMING_SNAKE_CASE = 128 SCREAMING_SNAKE_CASE = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="train[:1%]" ) SCREAMING_SNAKE_CASE = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="validation[:1%]" ) SCREAMING_SNAKE_CASE = train_dataset.select(range(32 ) ) SCREAMING_SNAKE_CASE = val_dataset.select(range(16 ) ) SCREAMING_SNAKE_CASE = 4 def _map_to_encoder_decoder_inputs(__lowerCamelCase : str ): # Tokenizer will automatically set [BOS] <text> [EOS] SCREAMING_SNAKE_CASE = tokenizer(batch["article"] , padding="max_length" , truncation=__lowerCamelCase , max_length=512 ) SCREAMING_SNAKE_CASE = tokenizer(batch["highlights"] , padding="max_length" , truncation=__lowerCamelCase , max_length=128 ) SCREAMING_SNAKE_CASE = inputs.input_ids SCREAMING_SNAKE_CASE = inputs.attention_mask SCREAMING_SNAKE_CASE = outputs.input_ids SCREAMING_SNAKE_CASE = outputs.input_ids.copy() SCREAMING_SNAKE_CASE = [ [-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["labels"] ] SCREAMING_SNAKE_CASE = outputs.attention_mask assert all(len(__lowerCamelCase ) == 512 for x in inputs.input_ids ) assert all(len(__lowerCamelCase ) == 128 for x in outputs.input_ids ) return batch def _compute_metrics(__lowerCamelCase : Union[str, Any] ): SCREAMING_SNAKE_CASE = pred.label_ids SCREAMING_SNAKE_CASE = pred.predictions # all unnecessary tokens are removed SCREAMING_SNAKE_CASE = tokenizer.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase ) SCREAMING_SNAKE_CASE = tokenizer.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase ) SCREAMING_SNAKE_CASE = sum([int(pred_str[i] == label_str[i] ) for i in range(len(__lowerCamelCase ) )] ) / len(__lowerCamelCase ) return {"accuracy": accuracy} # map train dataset SCREAMING_SNAKE_CASE = train_dataset.map( _map_to_encoder_decoder_inputs , batched=__lowerCamelCase , batch_size=__lowerCamelCase , remove_columns=["article", "highlights"] , ) train_dataset.set_format( type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , ) # same for validation dataset SCREAMING_SNAKE_CASE = val_dataset.map( _map_to_encoder_decoder_inputs , batched=__lowerCamelCase , batch_size=__lowerCamelCase , remove_columns=["article", "highlights"] , ) val_dataset.set_format( type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , ) SCREAMING_SNAKE_CASE = self.get_auto_remove_tmp_dir() SCREAMING_SNAKE_CASE = SeqaSeqTrainingArguments( output_dir=__lowerCamelCase , per_device_train_batch_size=__lowerCamelCase , per_device_eval_batch_size=__lowerCamelCase , predict_with_generate=__lowerCamelCase , evaluation_strategy="steps" , do_train=__lowerCamelCase , do_eval=__lowerCamelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , ) # instantiate trainer SCREAMING_SNAKE_CASE = SeqaSeqTrainer( model=__lowerCamelCase , args=__lowerCamelCase , compute_metrics=_compute_metrics , train_dataset=__lowerCamelCase , eval_dataset=__lowerCamelCase , tokenizer=__lowerCamelCase , ) # start training trainer.train()
698
from collections import deque from .hash_table import HashTable class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' def __init__( self : Optional[int] , *__lowerCamelCase : List[Any] , **__lowerCamelCase : Optional[Any] ): super().__init__(*__lowerCamelCase , **__lowerCamelCase ) def _snake_case ( self : Any , __lowerCamelCase : Dict , __lowerCamelCase : Tuple ): SCREAMING_SNAKE_CASE = deque([] ) if self.values[key] is None else self.values[key] self.values[key].appendleft(__lowerCamelCase ) SCREAMING_SNAKE_CASE = self.values[key] def _snake_case ( self : Union[str, Any] ): return ( sum(self.charge_factor - len(__lowerCamelCase ) for slot in self.values ) / self.size_table * self.charge_factor ) def _snake_case ( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Tuple=None ): if not ( len(self.values[key] ) == self.charge_factor and self.values.count(__lowerCamelCase ) == 0 ): return key return super()._collision_resolution(__lowerCamelCase , __lowerCamelCase )
698
1
from math import pi def __a ( A__ : int , A__ : int ): return 2 * pi * radius * (angle / 360) if __name__ == "__main__": print(arc_length(9_0, 1_0))
698
from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast from ...utils import logging __A : Optional[int] = logging.get_logger(__name__) __A : int = { 'EleutherAI/gpt-neo-1.3B': 'https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json', # See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo } class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = "gpt_neo" lowerCamelCase__ = ["past_key_values"] lowerCamelCase__ = {"num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"} def __init__( self : str , __lowerCamelCase : Dict=50257 , __lowerCamelCase : Tuple=2048 , __lowerCamelCase : Optional[Any]=2048 , __lowerCamelCase : int=24 , __lowerCamelCase : int=[[["global", "local"], 12]] , __lowerCamelCase : int=16 , __lowerCamelCase : List[str]=None , __lowerCamelCase : List[Any]=256 , __lowerCamelCase : Tuple="gelu_new" , __lowerCamelCase : Optional[Any]=0.0 , __lowerCamelCase : str=0.0 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : List[Any]=1e-5 , __lowerCamelCase : Dict=0.02 , __lowerCamelCase : Dict=True , __lowerCamelCase : Dict=50256 , __lowerCamelCase : Optional[int]=50256 , **__lowerCamelCase : Dict , ): SCREAMING_SNAKE_CASE = vocab_size SCREAMING_SNAKE_CASE = max_position_embeddings SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = num_layers SCREAMING_SNAKE_CASE = num_heads SCREAMING_SNAKE_CASE = intermediate_size SCREAMING_SNAKE_CASE = window_size SCREAMING_SNAKE_CASE = activation_function SCREAMING_SNAKE_CASE = resid_dropout SCREAMING_SNAKE_CASE = embed_dropout SCREAMING_SNAKE_CASE = attention_dropout SCREAMING_SNAKE_CASE = classifier_dropout SCREAMING_SNAKE_CASE = layer_norm_epsilon SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = use_cache SCREAMING_SNAKE_CASE = bos_token_id SCREAMING_SNAKE_CASE = eos_token_id SCREAMING_SNAKE_CASE = attention_types SCREAMING_SNAKE_CASE = self.expand_attention_types_params(__lowerCamelCase ) if len(self.attention_layers ) != self.num_layers: raise ValueError( "Configuration for convolutional module is incorrect. " "It is required that `len(config.attention_layers)` == `config.num_layers` " f"but is `len(config.attention_layers) = {len(self.attention_layers )}`, " f"`config.num_layers = {self.num_layers}`. " "`config.attention_layers` is prepared using `config.attention_types`. " "Please verify the value of `config.attention_types` argument." ) super().__init__(bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase ) @staticmethod def _snake_case ( __lowerCamelCase : Dict ): SCREAMING_SNAKE_CASE = [] for item in attention_types: for _ in range(item[1] ): attentions.extend(item[0] ) return attentions def __a ( A__ : str , A__ : List[Any] , A__ : List[str] , A__ : Union[str, Any] ): import torch SCREAMING_SNAKE_CASE = input.size() SCREAMING_SNAKE_CASE = len(A__ ) SCREAMING_SNAKE_CASE = shape[dimension] SCREAMING_SNAKE_CASE = torch.arange(0 , A__ , A__ ) SCREAMING_SNAKE_CASE = torch.div(sizedim - size , A__ , rounding_mode="floor" ) + 1 SCREAMING_SNAKE_CASE = torch.arange(A__ ) + low_indices[:min_length][:, None] SCREAMING_SNAKE_CASE = [slice(A__ )] * rank SCREAMING_SNAKE_CASE = indices SCREAMING_SNAKE_CASE = input[s] SCREAMING_SNAKE_CASE = list(range(0 , rank + 1 ) ) perm.append(perm.pop(dimension + 1 ) ) return sliced.permute(A__ ) def __a ( A__ : Union[str, Any] , A__ : Optional[int] ): import torch SCREAMING_SNAKE_CASE = torch.arange(1 , A__ ) SCREAMING_SNAKE_CASE = torch.remainder(A__ , A__ ) SCREAMING_SNAKE_CASE = remainders == 0 SCREAMING_SNAKE_CASE = candidates[divisor_indices] SCREAMING_SNAKE_CASE = torch.max(A__ ) return largest_divisor, torch.div(A__ , A__ , rounding_mode="floor" ) class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' @property def _snake_case ( self : Any ): SCREAMING_SNAKE_CASE = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} ) if self.use_past: self.fill_with_past_key_values_(__lowerCamelCase , direction="inputs" ) SCREAMING_SNAKE_CASE = {0: "batch", 1: "past_sequence + sequence"} else: SCREAMING_SNAKE_CASE = {0: "batch", 1: "sequence"} return common_inputs @property def _snake_case ( self : Optional[int] ): return self._config.num_heads def _snake_case ( self : Union[str, Any] , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ): SCREAMING_SNAKE_CASE = super(__lowerCamelCase , self ).generate_dummy_inputs( __lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase ) # We need to order the input in the way they appears in the forward() SCREAMING_SNAKE_CASE = OrderedDict({"input_ids": common_inputs["input_ids"]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = common_inputs["input_ids"].shape # Not using the same length for past_key_values SCREAMING_SNAKE_CASE = seqlen + 2 SCREAMING_SNAKE_CASE = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) SCREAMING_SNAKE_CASE = [ (torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase )) for _ in range(self.num_layers ) ] SCREAMING_SNAKE_CASE = common_inputs["attention_mask"] if self.use_past: SCREAMING_SNAKE_CASE = ordered_inputs["attention_mask"].dtype SCREAMING_SNAKE_CASE = torch.cat( [ordered_inputs["attention_mask"], torch.ones(__lowerCamelCase , __lowerCamelCase , dtype=__lowerCamelCase )] , dim=1 ) return ordered_inputs @property def _snake_case ( self : Optional[int] ): return 13
698
1
import importlib import os from dataclasses import dataclass from enum import Enum from typing import Any, Dict, Optional, Union import torch from ..utils import BaseOutput __A : str = 'scheduler_config.json' class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = 1 lowerCamelCase__ = 2 lowerCamelCase__ = 3 lowerCamelCase__ = 4 lowerCamelCase__ = 5 lowerCamelCase__ = 6 lowerCamelCase__ = 7 lowerCamelCase__ = 8 lowerCamelCase__ = 9 lowerCamelCase__ = 1_0 lowerCamelCase__ = 1_1 lowerCamelCase__ = 1_2 lowerCamelCase__ = 1_3 lowerCamelCase__ = 1_4 @dataclass class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = 42 class _SCREAMING_SNAKE_CASE : '''simple docstring''' lowerCamelCase__ = SCHEDULER_CONFIG_NAME lowerCamelCase__ = [] lowerCamelCase__ = True @classmethod def _snake_case ( cls : str , __lowerCamelCase : Dict[str, Any] = None , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : List[str]=False , **__lowerCamelCase : int , ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = cls.load_config( pretrained_model_name_or_path=__lowerCamelCase , subfolder=__lowerCamelCase , return_unused_kwargs=__lowerCamelCase , return_commit_hash=__lowerCamelCase , **__lowerCamelCase , ) return cls.from_config(__lowerCamelCase , return_unused_kwargs=__lowerCamelCase , **__lowerCamelCase ) def _snake_case ( self : Tuple , __lowerCamelCase : Union[str, os.PathLike] , __lowerCamelCase : bool = False , **__lowerCamelCase : Optional[Any] ): self.save_config(save_directory=__lowerCamelCase , push_to_hub=__lowerCamelCase , **__lowerCamelCase ) @property def _snake_case ( self : Optional[Any] ): return self._get_compatibles() @classmethod def _snake_case ( cls : Optional[int] ): SCREAMING_SNAKE_CASE = list(set([cls.__name__] + cls._compatibles ) ) SCREAMING_SNAKE_CASE = importlib.import_module(__name__.split("." )[0] ) SCREAMING_SNAKE_CASE = [ getattr(__lowerCamelCase , __lowerCamelCase ) for c in compatible_classes_str if hasattr(__lowerCamelCase , __lowerCamelCase ) ] return compatible_classes
698
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available __A : Any = { 'configuration_longt5': ['LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LongT5Config', 'LongT5OnnxConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : List[str] = [ 'LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST', 'LongT5EncoderModel', 'LongT5ForConditionalGeneration', 'LongT5Model', 'LongT5PreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : List[Any] = [ 'FlaxLongT5ForConditionalGeneration', 'FlaxLongT5Model', 'FlaxLongT5PreTrainedModel', ] if TYPE_CHECKING: from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longta import ( LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST, LongTaEncoderModel, LongTaForConditionalGeneration, LongTaModel, LongTaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_longta import ( FlaxLongTaForConditionalGeneration, FlaxLongTaModel, FlaxLongTaPreTrainedModel, ) else: import sys __A : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
698
1
from collections import OrderedDict from typing import Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...feature_extraction_utils import FeatureExtractionMixin from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import TensorType, logging __A : List[str] = logging.get_logger(__name__) __A : List[str] = { 'deepmind/language-perceiver': 'https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json', # See all Perceiver models at https://huggingface.co/models?filter=perceiver } class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = "perceiver" def __init__( self : int , __lowerCamelCase : Optional[Any]=256 , __lowerCamelCase : int=1280 , __lowerCamelCase : List[str]=768 , __lowerCamelCase : int=1 , __lowerCamelCase : Any=26 , __lowerCamelCase : Optional[Any]=8 , __lowerCamelCase : Any=8 , __lowerCamelCase : Tuple=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : List[str]="kv" , __lowerCamelCase : List[str]=1 , __lowerCamelCase : Optional[Any]=1 , __lowerCamelCase : List[str]="gelu" , __lowerCamelCase : List[Any]=0.1 , __lowerCamelCase : List[Any]=0.02 , __lowerCamelCase : Dict=1e-12 , __lowerCamelCase : List[str]=True , __lowerCamelCase : Optional[Any]=262 , __lowerCamelCase : Optional[int]=2048 , __lowerCamelCase : int=56 , __lowerCamelCase : Union[str, Any]=[368, 496] , __lowerCamelCase : Tuple=16 , __lowerCamelCase : Tuple=1920 , __lowerCamelCase : Optional[int]=16 , __lowerCamelCase : List[Any]=[1, 16, 224, 224] , **__lowerCamelCase : Union[str, Any] , ): super().__init__(**__lowerCamelCase ) SCREAMING_SNAKE_CASE = num_latents SCREAMING_SNAKE_CASE = d_latents SCREAMING_SNAKE_CASE = d_model SCREAMING_SNAKE_CASE = num_blocks SCREAMING_SNAKE_CASE = num_self_attends_per_block SCREAMING_SNAKE_CASE = num_self_attention_heads SCREAMING_SNAKE_CASE = num_cross_attention_heads SCREAMING_SNAKE_CASE = qk_channels SCREAMING_SNAKE_CASE = v_channels SCREAMING_SNAKE_CASE = cross_attention_shape_for_attention SCREAMING_SNAKE_CASE = self_attention_widening_factor SCREAMING_SNAKE_CASE = cross_attention_widening_factor SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = attention_probs_dropout_prob SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = layer_norm_eps SCREAMING_SNAKE_CASE = use_query_residual # masked language modeling attributes SCREAMING_SNAKE_CASE = vocab_size SCREAMING_SNAKE_CASE = max_position_embeddings # image classification attributes SCREAMING_SNAKE_CASE = image_size # flow attributes SCREAMING_SNAKE_CASE = train_size # multimodal autoencoding attributes SCREAMING_SNAKE_CASE = num_frames SCREAMING_SNAKE_CASE = audio_samples_per_frame SCREAMING_SNAKE_CASE = samples_per_patch SCREAMING_SNAKE_CASE = output_shape class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' @property def _snake_case ( self : List[Any] ): if self.task == "multiple-choice": SCREAMING_SNAKE_CASE = {0: "batch", 1: "choice", 2: "sequence"} else: SCREAMING_SNAKE_CASE = {0: "batch", 1: "sequence"} return OrderedDict( [ ("inputs", dynamic_axis), ("attention_mask", dynamic_axis), ] ) @property def _snake_case ( self : Union[str, Any] ): return 1e-4 def _snake_case ( self : Union[str, Any] , __lowerCamelCase : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , __lowerCamelCase : int = 3 , __lowerCamelCase : int = 40 , __lowerCamelCase : int = 40 , ): # copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified if isinstance(__lowerCamelCase , __lowerCamelCase ): # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX SCREAMING_SNAKE_CASE = compute_effective_axis_dimension( __lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX SCREAMING_SNAKE_CASE = preprocessor.num_special_tokens_to_add(__lowerCamelCase ) SCREAMING_SNAKE_CASE = compute_effective_axis_dimension( __lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowerCamelCase ) # Generate dummy inputs according to compute batch and sequence SCREAMING_SNAKE_CASE = [" ".join(["a"] ) * seq_length] * batch_size SCREAMING_SNAKE_CASE = dict(preprocessor(__lowerCamelCase , return_tensors=__lowerCamelCase ) ) SCREAMING_SNAKE_CASE = inputs.pop("input_ids" ) return inputs elif isinstance(__lowerCamelCase , __lowerCamelCase ) and preprocessor.model_input_names[0] == "pixel_values": # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX SCREAMING_SNAKE_CASE = compute_effective_axis_dimension(__lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch ) SCREAMING_SNAKE_CASE = self._generate_dummy_images(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE = dict(preprocessor(images=__lowerCamelCase , return_tensors=__lowerCamelCase ) ) SCREAMING_SNAKE_CASE = inputs.pop("pixel_values" ) return inputs else: raise ValueError( "Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." )
698
import cmath import math def __a ( A__ : float , A__ : float , A__ : float , A__ : float ): SCREAMING_SNAKE_CASE = math.radians(A__ ) SCREAMING_SNAKE_CASE = math.radians(A__ ) # Convert voltage and current to rectangular form SCREAMING_SNAKE_CASE = cmath.rect(A__ , A__ ) SCREAMING_SNAKE_CASE = cmath.rect(A__ , A__ ) # Calculate apparent power return voltage_rect * current_rect if __name__ == "__main__": import doctest doctest.testmod()
698
1
from math import factorial class _SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict ): SCREAMING_SNAKE_CASE = real if isinstance(__lowerCamelCase , __lowerCamelCase ): SCREAMING_SNAKE_CASE = [1] * rank else: SCREAMING_SNAKE_CASE = rank def __repr__( self : Any ): return ( f"{self.real}+" f"{'+'.join(str(__lowerCamelCase )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}" ) def _snake_case ( self : Any ): SCREAMING_SNAKE_CASE = self.duals.copy() while cur[-1] == 0: cur.pop(-1 ) return Dual(self.real , __lowerCamelCase ) def __add__( self : List[Any] , __lowerCamelCase : str ): if not isinstance(__lowerCamelCase , __lowerCamelCase ): return Dual(self.real + other , self.duals ) SCREAMING_SNAKE_CASE = self.duals.copy() SCREAMING_SNAKE_CASE = other.duals.copy() if len(__lowerCamelCase ) > len(__lowerCamelCase ): o_dual.extend([1] * (len(__lowerCamelCase ) - len(__lowerCamelCase )) ) elif len(__lowerCamelCase ) < len(__lowerCamelCase ): s_dual.extend([1] * (len(__lowerCamelCase ) - len(__lowerCamelCase )) ) SCREAMING_SNAKE_CASE = [] for i in range(len(__lowerCamelCase ) ): new_duals.append(s_dual[i] + o_dual[i] ) return Dual(self.real + other.real , __lowerCamelCase ) lowerCamelCase__ = __add__ def __sub__( self : Dict , __lowerCamelCase : Union[str, Any] ): return self + other * -1 def __mul__( self : Tuple , __lowerCamelCase : List[Any] ): if not isinstance(__lowerCamelCase , __lowerCamelCase ): SCREAMING_SNAKE_CASE = [] for i in self.duals: new_duals.append(i * other ) return Dual(self.real * other , __lowerCamelCase ) SCREAMING_SNAKE_CASE = [0] * (len(self.duals ) + len(other.duals ) + 1) for i, item in enumerate(self.duals ): for j, jtem in enumerate(other.duals ): new_duals[i + j + 1] += item * jtem for k in range(len(self.duals ) ): new_duals[k] += self.duals[k] * other.real for index in range(len(other.duals ) ): new_duals[index] += other.duals[index] * self.real return Dual(self.real * other.real , __lowerCamelCase ) lowerCamelCase__ = __mul__ def __truediv__( self : Any , __lowerCamelCase : str ): if not isinstance(__lowerCamelCase , __lowerCamelCase ): SCREAMING_SNAKE_CASE = [] for i in self.duals: new_duals.append(i / other ) return Dual(self.real / other , __lowerCamelCase ) raise ValueError def __floordiv__( self : Optional[Any] , __lowerCamelCase : Tuple ): if not isinstance(__lowerCamelCase , __lowerCamelCase ): SCREAMING_SNAKE_CASE = [] for i in self.duals: new_duals.append(i // other ) return Dual(self.real // other , __lowerCamelCase ) raise ValueError def __pow__( self : Dict , __lowerCamelCase : List[str] ): if n < 0 or isinstance(__lowerCamelCase , __lowerCamelCase ): raise ValueError("power must be a positive integer" ) if n == 0: return 1 if n == 1: return self SCREAMING_SNAKE_CASE = self for _ in range(n - 1 ): x *= self return x def __a ( A__ : Optional[int] , A__ : Optional[Any] , A__ : List[Any] ): if not callable(A__ ): raise ValueError("differentiate() requires a function as input for func" ) if not isinstance(A__ , (float, int) ): raise ValueError("differentiate() requires a float as input for position" ) if not isinstance(A__ , A__ ): raise ValueError("differentiate() requires an int as input for order" ) SCREAMING_SNAKE_CASE = Dual(A__ , 1 ) SCREAMING_SNAKE_CASE = func(A__ ) if order == 0: return result.real return result.duals[order - 1] * factorial(A__ ) if __name__ == "__main__": import doctest doctest.testmod() def __a ( A__ : Optional[Any] ): return y**2 * y**4 print(differentiate(f, 9, 2))
698
import argparse import json import os import torch from torch import nn from transformers import NllbMoeConfig, NllbMoeModel from transformers.modeling_utils import dtype_byte_size from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME def __a ( A__ : List[str] ): SCREAMING_SNAKE_CASE = [ "encoder.version", "decoder.version", "model.encoder.version", "model.decoder.version", "decoder.output_projection.weight", "_float_tensor", "encoder.embed_positions._float_tensor", "decoder.embed_positions._float_tensor", ] for k in ignore_keys: state_dict.pop(A__ , A__ ) def __a ( A__ : Tuple ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = emb.weight.shape SCREAMING_SNAKE_CASE = nn.Linear(A__ , A__ , bias=A__ ) SCREAMING_SNAKE_CASE = emb.weight.data return lin_layer def __a ( A__ : Tuple , A__ : Union[str, Any]=None ): SCREAMING_SNAKE_CASE = {} for old_key in state_dict.keys(): SCREAMING_SNAKE_CASE = old_key if "moe_layer.experts." in key: if expert_idx is not None: SCREAMING_SNAKE_CASE = key.replace("moe_layer.experts.0" , F"ffn.experts.expert_{expert_idx}" ) else: SCREAMING_SNAKE_CASE = key.replace("moe_layer.experts." , "ffn.experts.expert_" ) if "gate" in key: SCREAMING_SNAKE_CASE = key.replace(".moe_layer.gate.wg" , ".ffn.router.classifier" ) if "fc2" and "experts" not in key: SCREAMING_SNAKE_CASE = key.replace(".fc2." , ".ffn.fc2." ) if "fc1" and "experts" not in key: SCREAMING_SNAKE_CASE = key.replace(".fc1." , ".ffn.fc1." ) if ".encoder_attn." in key: SCREAMING_SNAKE_CASE = key.replace(".encoder_attn." , ".cross_attention." ) if "encoder_attn_layer_norm" in key: SCREAMING_SNAKE_CASE = key.replace("encoder_attn_layer_norm" , "cross_attention_layer_norm" ) if "final_layer_norm" in key: SCREAMING_SNAKE_CASE = key.replace("final_layer_norm" , "ff_layer_norm" ) SCREAMING_SNAKE_CASE = state_dict[old_key] return new_dict def __a ( A__ : List[str] , A__ : List[Any] , A__ : str , A__ : Union[str, Any] , A__ : str = WEIGHTS_NAME ): SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = 0 os.makedirs(A__ , exist_ok=A__ ) for expert in range(A__ ): SCREAMING_SNAKE_CASE = switch_checkpoint_path + F"-rank-{expert}.pt" if os.path.isfile(A__ ): SCREAMING_SNAKE_CASE = torch.load(A__ )["model"] remove_ignore_keys_(A__ ) SCREAMING_SNAKE_CASE = rename_fairseq_keys(A__ , A__ ) SCREAMING_SNAKE_CASE = os.path.join( A__ , weights_name.replace(".bin" , F"-{len(A__ )+1:05d}-of-???.bin" ) ) torch.save(A__ , A__ ) sharded_state_dicts.append(expert_state.keys() ) total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size( expert_state[list(A__ )[0]].dtype ) # Add the last block SCREAMING_SNAKE_CASE = os.path.join(A__ , weights_name.replace(".bin" , F"-{len(A__ )+1:05d}-of-???.bin" ) ) SCREAMING_SNAKE_CASE = torch.load(switch_checkpoint_path + "-shared.pt" )["model"] remove_ignore_keys_(A__ ) SCREAMING_SNAKE_CASE = rename_fairseq_keys(A__ , A__ ) SCREAMING_SNAKE_CASE = shared_weights["decoder.embed_tokens.weight"] sharded_state_dicts.append(shared_weights.keys() ) # If we only have the shared weights (dummy model/experts saved on the same file) if len(A__ ) == 1: SCREAMING_SNAKE_CASE = os.path.join(A__ , A__ ) torch.save(A__ , A__ ) return {weights_name: sharded_state_dicts[0]}, None else: torch.save(A__ , A__ ) # Otherwise, let's build the index SCREAMING_SNAKE_CASE = {} for idx, shard in enumerate(A__ ): SCREAMING_SNAKE_CASE = weights_name.replace(".bin" , F"-{idx+1:05d}-of-{len(A__ ):05d}.bin" ) SCREAMING_SNAKE_CASE = os.path.join(A__ , weights_name.replace(".bin" , F"-{idx+1:05d}-of-???.bin" ) ) os.rename(A__ , os.path.join(A__ , A__ ) ) for key in shard: SCREAMING_SNAKE_CASE = shard_file # Add the metadata SCREAMING_SNAKE_CASE = {"total_size": total_size} SCREAMING_SNAKE_CASE = {"metadata": metadata, "weight_map": weight_map} with open(os.path.join(A__ , A__ ) , "w" , encoding="utf-8" ) as f: SCREAMING_SNAKE_CASE = json.dumps(A__ , indent=2 , sort_keys=A__ ) + "\n" f.write(A__ ) return metadata, index if __name__ == "__main__": __A : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( '--nllb_moe_checkpoint_path', default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000', type=str, required=False, help='Path to a directory containing a folder per layer. Follows the original Google format.', ) parser.add_argument('--dtype', default='float32', type=str, required=False, help='dtype of the saved model') parser.add_argument( '--pytorch_dump_folder_path', default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b', type=str, required=False, help='Path to the output pytorch model.', ) __A : Optional[int] = parser.parse_args() __A , __A : Union[str, Any] = shard_on_the_fly( args.nllb_moe_checkpoint_path, args.pytorch_dump_folder_path, 1_2_8, args.dtype, ) __A : Any = NllbMoeConfig.from_pretrained( 'facebook/nllb-200-3.3B', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=1_2_8 ) config.save_pretrained(args.pytorch_dump_folder_path) __A : Any = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path) print('Done') model.save_pretrained(args.pytorch_dump_folder_path)
698
1
from collections import deque from .hash_table import HashTable class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' def __init__( self : Optional[int] , *__lowerCamelCase : List[Any] , **__lowerCamelCase : Optional[Any] ): super().__init__(*__lowerCamelCase , **__lowerCamelCase ) def _snake_case ( self : Any , __lowerCamelCase : Dict , __lowerCamelCase : Tuple ): SCREAMING_SNAKE_CASE = deque([] ) if self.values[key] is None else self.values[key] self.values[key].appendleft(__lowerCamelCase ) SCREAMING_SNAKE_CASE = self.values[key] def _snake_case ( self : Union[str, Any] ): return ( sum(self.charge_factor - len(__lowerCamelCase ) for slot in self.values ) / self.size_table * self.charge_factor ) def _snake_case ( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Tuple=None ): if not ( len(self.values[key] ) == self.charge_factor and self.values.count(__lowerCamelCase ) == 0 ): return key return super()._collision_resolution(__lowerCamelCase , __lowerCamelCase )
698
import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast @require_vision class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE = tempfile.mkdtemp() SCREAMING_SNAKE_CASE = BlipImageProcessor() SCREAMING_SNAKE_CASE = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel" ) SCREAMING_SNAKE_CASE = BlipProcessor(__lowerCamelCase , __lowerCamelCase ) processor.save_pretrained(self.tmpdirname ) def _snake_case ( self : Dict , **__lowerCamelCase : Any ): return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase ).tokenizer def _snake_case ( self : List[Any] , **__lowerCamelCase : Optional[Any] ): return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase ).image_processor def _snake_case ( self : Union[str, Any] ): shutil.rmtree(self.tmpdirname ) def _snake_case ( self : Tuple ): SCREAMING_SNAKE_CASE = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] SCREAMING_SNAKE_CASE = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def _snake_case ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) SCREAMING_SNAKE_CASE = self.get_image_processor(do_normalize=__lowerCamelCase , padding_value=1.0 ) SCREAMING_SNAKE_CASE = BlipProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__lowerCamelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __lowerCamelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __lowerCamelCase ) def _snake_case ( self : Optional[int] ): SCREAMING_SNAKE_CASE = self.get_image_processor() SCREAMING_SNAKE_CASE = self.get_tokenizer() SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) SCREAMING_SNAKE_CASE = self.prepare_image_inputs() SCREAMING_SNAKE_CASE = image_processor(__lowerCamelCase , return_tensors="np" ) SCREAMING_SNAKE_CASE = processor(images=__lowerCamelCase , return_tensors="np" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def _snake_case ( self : Dict ): SCREAMING_SNAKE_CASE = self.get_image_processor() SCREAMING_SNAKE_CASE = self.get_tokenizer() SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) SCREAMING_SNAKE_CASE = "lower newer" SCREAMING_SNAKE_CASE = processor(text=__lowerCamelCase ) SCREAMING_SNAKE_CASE = tokenizer(__lowerCamelCase , return_token_type_ids=__lowerCamelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def _snake_case ( self : str ): SCREAMING_SNAKE_CASE = self.get_image_processor() SCREAMING_SNAKE_CASE = self.get_tokenizer() SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) SCREAMING_SNAKE_CASE = "lower newer" SCREAMING_SNAKE_CASE = self.prepare_image_inputs() SCREAMING_SNAKE_CASE = processor(text=__lowerCamelCase , images=__lowerCamelCase ) self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] ) # test if it raises when no input is passed with pytest.raises(__lowerCamelCase ): processor() def _snake_case ( self : Any ): SCREAMING_SNAKE_CASE = self.get_image_processor() SCREAMING_SNAKE_CASE = self.get_tokenizer() SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) SCREAMING_SNAKE_CASE = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] SCREAMING_SNAKE_CASE = processor.batch_decode(__lowerCamelCase ) SCREAMING_SNAKE_CASE = tokenizer.batch_decode(__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) def _snake_case ( self : Dict ): SCREAMING_SNAKE_CASE = self.get_image_processor() SCREAMING_SNAKE_CASE = self.get_tokenizer() SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) SCREAMING_SNAKE_CASE = "lower newer" SCREAMING_SNAKE_CASE = self.prepare_image_inputs() SCREAMING_SNAKE_CASE = processor(text=__lowerCamelCase , images=__lowerCamelCase ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
698
1
import math import os from copy import deepcopy import datasets import evaluate import torch import transformers from datasets import load_dataset from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer from accelerate import Accelerator from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import is_tpu_available, set_seed __A : Union[str, Any] = 'true' def __a ( A__ : List[str] , A__ : Dict=82 , A__ : int=16 ): set_seed(42 ) SCREAMING_SNAKE_CASE = RegressionModel() SCREAMING_SNAKE_CASE = deepcopy(A__ ) SCREAMING_SNAKE_CASE = RegressionDataset(length=A__ ) SCREAMING_SNAKE_CASE = DataLoader(A__ , batch_size=A__ ) model.to(accelerator.device ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare(A__ , A__ ) return model, ddp_model, dataloader def __a ( A__ : Accelerator , A__ : Union[str, Any]=False ): SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("hf-internal-testing/mrpc-bert-base-cased" ) SCREAMING_SNAKE_CASE = load_dataset("glue" , "mrpc" , split="validation" ) def tokenize_function(A__ : List[Any] ): SCREAMING_SNAKE_CASE = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=A__ , max_length=A__ ) return outputs with accelerator.main_process_first(): SCREAMING_SNAKE_CASE = dataset.map( A__ , batched=A__ , remove_columns=["idx", "sentence1", "sentence2"] , ) SCREAMING_SNAKE_CASE = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(A__ : int ): if use_longest: return tokenizer.pad(A__ , padding="longest" , return_tensors="pt" ) return tokenizer.pad(A__ , padding="max_length" , max_length=128 , return_tensors="pt" ) return DataLoader(A__ , shuffle=A__ , collate_fn=A__ , batch_size=16 ) def __a ( A__ : Any , A__ : str ): SCREAMING_SNAKE_CASE = Accelerator(dispatch_batches=A__ , split_batches=A__ ) SCREAMING_SNAKE_CASE = get_dataloader(A__ , not dispatch_batches ) SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification.from_pretrained( "hf-internal-testing/mrpc-bert-base-cased" , return_dict=A__ ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare(A__ , A__ ) return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator def __a ( A__ : Optional[Any] , A__ : Union[str, Any] , A__ : Dict ): SCREAMING_SNAKE_CASE = [] for batch in dataloader: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = batch.values() with torch.no_grad(): SCREAMING_SNAKE_CASE = model(A__ ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.gather_for_metrics((logit, target) ) logits_and_targets.append((logit, target) ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = [], [] for logit, targ in logits_and_targets: logits.append(A__ ) targs.append(A__ ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = torch.cat(A__ ), torch.cat(A__ ) return logits, targs def __a ( A__ : Accelerator , A__ : List[Any]=82 , A__ : Tuple=False , A__ : Union[str, Any]=False , A__ : Any=16 ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_basic_setup(A__ , A__ , A__ ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = generate_predictions(A__ , A__ , A__ ) assert ( len(A__ ) == num_samples ), F"Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(A__ )}" def __a ( A__ : bool = False , A__ : bool = False ): SCREAMING_SNAKE_CASE = evaluate.load("glue" , "mrpc" ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_mrpc_setup(A__ , A__ ) # First do baseline SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = setup["no"] model.to(A__ ) model.eval() for batch in dataloader: batch.to(A__ ) with torch.inference_mode(): SCREAMING_SNAKE_CASE = model(**A__ ) SCREAMING_SNAKE_CASE = outputs.logits.argmax(dim=-1 ) metric.add_batch(predictions=A__ , references=batch["labels"] ) SCREAMING_SNAKE_CASE = metric.compute() # Then do distributed SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = setup["ddp"] model.eval() for batch in dataloader: with torch.inference_mode(): SCREAMING_SNAKE_CASE = model(**A__ ) SCREAMING_SNAKE_CASE = outputs.logits.argmax(dim=-1 ) SCREAMING_SNAKE_CASE = batch["labels"] SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.gather_for_metrics((preds, references) ) metric.add_batch(predictions=A__ , references=A__ ) SCREAMING_SNAKE_CASE = metric.compute() for key in "accuracy f1".split(): assert math.isclose( baseline[key] , distributed[key] ), F"Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n" def __a ( ): SCREAMING_SNAKE_CASE = Accelerator(split_batches=A__ , dispatch_batches=A__ ) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # These are a bit slower so they should only be ran on the GPU or TPU if torch.cuda.is_available() or is_tpu_available(): if accelerator.is_local_main_process: print("**Testing gather_for_metrics**" ) for split_batches in [True, False]: for dispatch_batches in [True, False]: if accelerator.is_local_main_process: print(F"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`" ) test_mrpc(A__ , A__ ) accelerator.state._reset_state() if accelerator.is_local_main_process: print("**Test torch metrics**" ) for split_batches in [True, False]: for dispatch_batches in [True, False]: SCREAMING_SNAKE_CASE = Accelerator(split_batches=A__ , dispatch_batches=A__ ) if accelerator.is_local_main_process: print(F"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99" ) test_torch_metrics(A__ , 99 ) accelerator.state._reset_state() if accelerator.is_local_main_process: print("**Test last batch is not dropped when perfectly divisible**" ) SCREAMING_SNAKE_CASE = Accelerator() test_torch_metrics(A__ , 512 ) accelerator.state._reset_state() def __a ( A__ : List[str] ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
698
import unittest from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available from transformers.pipelines import pipeline from transformers.pipelines.document_question_answering import apply_tesseract from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_detectrona, require_pytesseract, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image from transformers.image_utils import load_image else: class _SCREAMING_SNAKE_CASE : '''simple docstring''' @staticmethod def _snake_case ( *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : Union[str, Any] ): pass def __a ( A__ : str ): return None # This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace, # so we can expect it to be available. __A : Tuple = ( 'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png' ) @is_pipeline_test @require_torch @require_vision class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING @require_pytesseract @require_vision def _snake_case ( self : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] ): SCREAMING_SNAKE_CASE = pipeline( "document-question-answering" , model=__lowerCamelCase , tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) SCREAMING_SNAKE_CASE = INVOICE_URL SCREAMING_SNAKE_CASE = list(zip(*apply_tesseract(load_image(__lowerCamelCase ) , __lowerCamelCase , "" ) ) ) SCREAMING_SNAKE_CASE = "What is the placebo?" SCREAMING_SNAKE_CASE = [ { "image": load_image(__lowerCamelCase ), "question": question, }, { "image": image, "question": question, }, { "image": image, "question": question, "word_boxes": word_boxes, }, ] return dqa_pipeline, examples def _snake_case ( self : List[Any] , __lowerCamelCase : int , __lowerCamelCase : int ): SCREAMING_SNAKE_CASE = dqa_pipeline(__lowerCamelCase , top_k=2 ) self.assertEqual( __lowerCamelCase , [ [ {"score": ANY(__lowerCamelCase ), "answer": ANY(__lowerCamelCase ), "start": ANY(__lowerCamelCase ), "end": ANY(__lowerCamelCase )}, {"score": ANY(__lowerCamelCase ), "answer": ANY(__lowerCamelCase ), "start": ANY(__lowerCamelCase ), "end": ANY(__lowerCamelCase )}, ] ] * 3 , ) @require_torch @require_detectrona @require_pytesseract def _snake_case ( self : Optional[int] ): SCREAMING_SNAKE_CASE = pipeline("document-question-answering" , model="hf-internal-testing/tiny-random-layoutlmv2" ) SCREAMING_SNAKE_CASE = INVOICE_URL SCREAMING_SNAKE_CASE = "How many cats are there?" SCREAMING_SNAKE_CASE = [ {"score": 0.0_001, "answer": "oy 2312/2019", "start": 38, "end": 39}, {"score": 0.0_001, "answer": "oy 2312/2019 DUE", "start": 38, "end": 40}, ] SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 ) self.assertEqual(nested_simplify(__lowerCamelCase , decimals=4 ) , __lowerCamelCase ) SCREAMING_SNAKE_CASE = dqa_pipeline({"image": image, "question": question} , top_k=2 ) self.assertEqual(nested_simplify(__lowerCamelCase , decimals=4 ) , __lowerCamelCase ) # This image does not detect ANY text in it, meaning layoutlmv2 should fail. # Empty answer probably SCREAMING_SNAKE_CASE = "./tests/fixtures/tests_samples/COCO/000000039769.png" SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 ) self.assertEqual(__lowerCamelCase , [] ) # We can optionnally pass directly the words and bounding boxes SCREAMING_SNAKE_CASE = "./tests/fixtures/tests_samples/COCO/000000039769.png" SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , words=__lowerCamelCase , boxes=__lowerCamelCase , top_k=2 ) self.assertEqual(__lowerCamelCase , [] ) @slow @require_torch @require_detectrona @require_pytesseract def _snake_case ( self : List[str] ): SCREAMING_SNAKE_CASE = pipeline( "document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , ) SCREAMING_SNAKE_CASE = INVOICE_URL SCREAMING_SNAKE_CASE = "What is the invoice number?" SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 ) self.assertEqual( nested_simplify(__lowerCamelCase , decimals=4 ) , [ {"score": 0.9_944, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0_009, "answer": "us-001", "start": 16, "end": 16}, ] , ) SCREAMING_SNAKE_CASE = dqa_pipeline({"image": image, "question": question} , top_k=2 ) self.assertEqual( nested_simplify(__lowerCamelCase , decimals=4 ) , [ {"score": 0.9_944, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0_009, "answer": "us-001", "start": 16, "end": 16}, ] , ) SCREAMING_SNAKE_CASE = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 ) self.assertEqual( nested_simplify(__lowerCamelCase , decimals=4 ) , [ [ {"score": 0.9_944, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0_009, "answer": "us-001", "start": 16, "end": 16}, ], ] * 2 , ) @slow @require_torch @require_detectrona @require_pytesseract def _snake_case ( self : str ): SCREAMING_SNAKE_CASE = pipeline( "document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , max_seq_len=50 , ) SCREAMING_SNAKE_CASE = INVOICE_URL SCREAMING_SNAKE_CASE = "What is the invoice number?" SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 ) self.assertEqual( nested_simplify(__lowerCamelCase , decimals=4 ) , [ {"score": 0.9_974, "answer": "1110212019", "start": 23, "end": 23}, {"score": 0.9_948, "answer": "us-001", "start": 16, "end": 16}, ] , ) SCREAMING_SNAKE_CASE = dqa_pipeline({"image": image, "question": question} , top_k=2 ) self.assertEqual( nested_simplify(__lowerCamelCase , decimals=4 ) , [ {"score": 0.9_974, "answer": "1110212019", "start": 23, "end": 23}, {"score": 0.9_948, "answer": "us-001", "start": 16, "end": 16}, ] , ) SCREAMING_SNAKE_CASE = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 ) self.assertEqual( nested_simplify(__lowerCamelCase , decimals=4 ) , [ [ {"score": 0.9_974, "answer": "1110212019", "start": 23, "end": 23}, {"score": 0.9_948, "answer": "us-001", "start": 16, "end": 16}, ] ] * 2 , ) @slow @require_torch @require_pytesseract @require_vision def _snake_case ( self : str ): SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained( "impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=__lowerCamelCase ) SCREAMING_SNAKE_CASE = pipeline( "document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=__lowerCamelCase , revision="3dc6de3" , ) SCREAMING_SNAKE_CASE = INVOICE_URL SCREAMING_SNAKE_CASE = "What is the invoice number?" SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 ) self.assertEqual( nested_simplify(__lowerCamelCase , decimals=4 ) , [ {"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23}, ] , ) SCREAMING_SNAKE_CASE = dqa_pipeline({"image": image, "question": question} , top_k=2 ) self.assertEqual( nested_simplify(__lowerCamelCase , decimals=4 ) , [ {"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23}, ] , ) SCREAMING_SNAKE_CASE = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 ) self.assertEqual( nested_simplify(__lowerCamelCase , decimals=4 ) , [ [ {"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23}, ] ] * 2 , ) SCREAMING_SNAKE_CASE = list(zip(*apply_tesseract(load_image(__lowerCamelCase ) , __lowerCamelCase , "" ) ) ) # This model should also work if `image` is set to None SCREAMING_SNAKE_CASE = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 ) self.assertEqual( nested_simplify(__lowerCamelCase , decimals=4 ) , [ {"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23}, ] , ) @slow @require_torch @require_pytesseract @require_vision def _snake_case ( self : Dict ): SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained( "impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=__lowerCamelCase ) SCREAMING_SNAKE_CASE = pipeline( "document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=__lowerCamelCase , revision="3dc6de3" , max_seq_len=50 , ) SCREAMING_SNAKE_CASE = INVOICE_URL SCREAMING_SNAKE_CASE = "What is the invoice number?" SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 ) self.assertEqual( nested_simplify(__lowerCamelCase , decimals=4 ) , [ {"score": 0.9_999, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.9_998, "answer": "us-001", "start": 16, "end": 16}, ] , ) SCREAMING_SNAKE_CASE = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 ) self.assertEqual( nested_simplify(__lowerCamelCase , decimals=4 ) , [ [ {"score": 0.9_999, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.9_998, "answer": "us-001", "start": 16, "end": 16}, ] ] * 2 , ) SCREAMING_SNAKE_CASE = list(zip(*apply_tesseract(load_image(__lowerCamelCase ) , __lowerCamelCase , "" ) ) ) # This model should also work if `image` is set to None SCREAMING_SNAKE_CASE = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 ) self.assertEqual( nested_simplify(__lowerCamelCase , decimals=4 ) , [ {"score": 0.9_999, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.9_998, "answer": "us-001", "start": 16, "end": 16}, ] , ) @slow @require_torch def _snake_case ( self : List[str] ): SCREAMING_SNAKE_CASE = pipeline( "document-question-answering" , model="naver-clova-ix/donut-base-finetuned-docvqa" , tokenizer=AutoTokenizer.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa" ) , feature_extractor="naver-clova-ix/donut-base-finetuned-docvqa" , ) SCREAMING_SNAKE_CASE = INVOICE_URL SCREAMING_SNAKE_CASE = "What is the invoice number?" SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 ) self.assertEqual(nested_simplify(__lowerCamelCase , decimals=4 ) , [{"answer": "us-001"}] ) @require_tf @unittest.skip("Document question answering not implemented in TF" ) def _snake_case ( self : List[Any] ): pass
698
1
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline else: from .pipeline_kandinsky import KandinskyPipeline from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput from .text_encoder import MultilingualCLIP
698
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer from .base import PipelineTool class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = "facebook/bart-large-mnli" lowerCamelCase__ = ( "This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which " "should be the text to classify, and `labels`, which should be the list of labels to use for classification. " "It returns the most likely label in the list of provided `labels` for the input text." ) lowerCamelCase__ = "text_classifier" lowerCamelCase__ = AutoTokenizer lowerCamelCase__ = AutoModelForSequenceClassification lowerCamelCase__ = ["text", ["text"]] lowerCamelCase__ = ["text"] def _snake_case ( self : Optional[Any] ): super().setup() SCREAMING_SNAKE_CASE = self.model.config SCREAMING_SNAKE_CASE = -1 for idx, label in config.idalabel.items(): if label.lower().startswith("entail" ): SCREAMING_SNAKE_CASE = int(__lowerCamelCase ) if self.entailment_id == -1: raise ValueError("Could not determine the entailment ID from the model config, please pass it at init." ) def _snake_case ( self : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] ): SCREAMING_SNAKE_CASE = labels return self.pre_processor( [text] * len(__lowerCamelCase ) , [f"This example is {label}" for label in labels] , return_tensors="pt" , padding="max_length" , ) def _snake_case ( self : str , __lowerCamelCase : Optional[int] ): SCREAMING_SNAKE_CASE = outputs.logits SCREAMING_SNAKE_CASE = torch.argmax(logits[:, 2] ).item() return self._labels[label_id]
698
1
import inspect import os import unittest import torch import accelerate from accelerate import debug_launcher from accelerate.test_utils import ( execute_subprocess_async, require_cpu, require_huggingface_suite, require_multi_gpu, require_single_gpu, ) from accelerate.utils import patch_environment @require_huggingface_suite class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self : List[Any] ): SCREAMING_SNAKE_CASE = inspect.getfile(accelerate.test_utils ) SCREAMING_SNAKE_CASE = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ["scripts", "external_deps", "test_metrics.py"] ) from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401 SCREAMING_SNAKE_CASE = test_metrics @require_cpu def _snake_case ( self : int ): debug_launcher(self.test_metrics.main , num_processes=1 ) @require_cpu def _snake_case ( self : Tuple ): debug_launcher(self.test_metrics.main ) @require_single_gpu def _snake_case ( self : str ): self.test_metrics.main() @require_multi_gpu def _snake_case ( self : int ): print(f"Found {torch.cuda.device_count()} devices." ) SCREAMING_SNAKE_CASE = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__lowerCamelCase , env=os.environ.copy() )
698
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.test_utils import execute_subprocess_async def __a ( A__ : str=None ): if subparsers is not None: SCREAMING_SNAKE_CASE = subparsers.add_parser("test" ) else: SCREAMING_SNAKE_CASE = argparse.ArgumentParser("Accelerate test command" ) parser.add_argument( "--config_file" , default=A__ , help=( "The path to use to store the config file. Will default to a file named default_config.yaml in the cache " "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have " "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed " "with 'huggingface'." ) , ) if subparsers is not None: parser.set_defaults(func=A__ ) return parser def __a ( A__ : Tuple ): SCREAMING_SNAKE_CASE = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] ) if args.config_file is None: SCREAMING_SNAKE_CASE = script_name else: SCREAMING_SNAKE_CASE = F"--config_file={args.config_file} {script_name}" SCREAMING_SNAKE_CASE = ["accelerate-launch"] + test_args.split() SCREAMING_SNAKE_CASE = execute_subprocess_async(A__ , env=os.environ.copy() ) if result.returncode == 0: print("Test is a success! You are ready for your distributed training!" ) def __a ( ): SCREAMING_SNAKE_CASE = test_command_parser() SCREAMING_SNAKE_CASE = parser.parse_args() test_command(A__ ) if __name__ == "__main__": main()
698
1
__A : dict[str, float] = { "joule": 1.0, "kilojoule": 1_0_0_0, "megajoule": 1_0_0_0_0_0_0, "gigajoule": 1_0_0_0_0_0_0_0_0_0, "wattsecond": 1.0, "watthour": 3_6_0_0, "kilowatthour": 3_6_0_0_0_0_0, "newtonmeter": 1.0, "calorie_nutr": 4_1_8_6.8, "kilocalorie_nutr": 4_1_8_6_8_0_0.0_0, "electronvolt": 1.6_0217_6634e-19, "britishthermalunit_it": 1_0_5_5.0_5_5_8_5, "footpound": 1.355_818, } def __a ( A__ : str , A__ : str , A__ : float ): if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION: SCREAMING_SNAKE_CASE = ( F"Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n" F"Valid values are: {', '.join(A__ )}" ) raise ValueError(A__ ) return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type] if __name__ == "__main__": import doctest doctest.testmod()
698
import json import os from functools import lru_cache from typing import TYPE_CHECKING, List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation __A : Optional[int] = logging.get_logger(__name__) __A : List[str] = { 'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_config_file': 'tokenizer_config.json', } __A : Tuple = { 'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'}, 'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'}, 'tokenizer_config_file': { 'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json' }, } __A : Dict = {'facebook/blenderbot-3B': 1_2_8} @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def __a ( ): SCREAMING_SNAKE_CASE = ( list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) ) ) SCREAMING_SNAKE_CASE = bs[:] SCREAMING_SNAKE_CASE = 0 for b in range(2**8 ): if b not in bs: bs.append(A__ ) cs.append(2**8 + n ) n += 1 SCREAMING_SNAKE_CASE = [chr(A__ ) for n in cs] return dict(zip(A__ , A__ ) ) def __a ( A__ : Optional[int] ): SCREAMING_SNAKE_CASE = set() SCREAMING_SNAKE_CASE = word[0] for char in word[1:]: pairs.add((prev_char, char) ) SCREAMING_SNAKE_CASE = char return pairs class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = VOCAB_FILES_NAMES lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__ = ["input_ids", "attention_mask"] def __init__( self : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : List[Any]="replace" , __lowerCamelCase : Any="<s>" , __lowerCamelCase : List[Any]="</s>" , __lowerCamelCase : Dict="</s>" , __lowerCamelCase : Any="<s>" , __lowerCamelCase : Optional[Any]="<unk>" , __lowerCamelCase : str="<pad>" , __lowerCamelCase : List[Any]="<mask>" , __lowerCamelCase : List[Any]=False , **__lowerCamelCase : Tuple , ): SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token super().__init__( errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , **__lowerCamelCase , ) with open(__lowerCamelCase , encoding="utf-8" ) as vocab_handle: SCREAMING_SNAKE_CASE = json.load(__lowerCamelCase ) SCREAMING_SNAKE_CASE = {v: k for k, v in self.encoder.items()} SCREAMING_SNAKE_CASE = errors # how to handle errors in decoding SCREAMING_SNAKE_CASE = bytes_to_unicode() SCREAMING_SNAKE_CASE = {v: k for k, v in self.byte_encoder.items()} with open(__lowerCamelCase , encoding="utf-8" ) as merges_handle: SCREAMING_SNAKE_CASE = merges_handle.read().split("\n" )[1:-1] SCREAMING_SNAKE_CASE = [tuple(merge.split() ) for merge in bpe_merges] SCREAMING_SNAKE_CASE = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) ) SCREAMING_SNAKE_CASE = {} SCREAMING_SNAKE_CASE = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions SCREAMING_SNAKE_CASE = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" ) @property # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot def _snake_case ( self : Dict ): return len(self.encoder ) def _snake_case ( self : int ): return dict(self.encoder , **self.added_tokens_encoder ) def _snake_case ( self : Tuple , __lowerCamelCase : List[str] ): if token in self.cache: return self.cache[token] SCREAMING_SNAKE_CASE = tuple(__lowerCamelCase ) SCREAMING_SNAKE_CASE = get_pairs(__lowerCamelCase ) if not pairs: return token while True: SCREAMING_SNAKE_CASE = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float("inf" ) ) ) if bigram not in self.bpe_ranks: break SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = bigram SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = 0 while i < len(__lowerCamelCase ): try: SCREAMING_SNAKE_CASE = word.index(__lowerCamelCase , __lowerCamelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) SCREAMING_SNAKE_CASE = j if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 SCREAMING_SNAKE_CASE = tuple(__lowerCamelCase ) SCREAMING_SNAKE_CASE = new_word if len(__lowerCamelCase ) == 1: break else: SCREAMING_SNAKE_CASE = get_pairs(__lowerCamelCase ) SCREAMING_SNAKE_CASE = " ".join(__lowerCamelCase ) SCREAMING_SNAKE_CASE = word return word def _snake_case ( self : str , __lowerCamelCase : Dict ): SCREAMING_SNAKE_CASE = [] for token in re.findall(self.pat , __lowerCamelCase ): SCREAMING_SNAKE_CASE = "".join( self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowerCamelCase ).split(" " ) ) return bpe_tokens def _snake_case ( self : List[str] , __lowerCamelCase : str ): return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) ) def _snake_case ( self : Optional[int] , __lowerCamelCase : Tuple ): return self.decoder.get(__lowerCamelCase ) def _snake_case ( self : Tuple , __lowerCamelCase : str ): SCREAMING_SNAKE_CASE = "".join(__lowerCamelCase ) SCREAMING_SNAKE_CASE = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors ) return text def _snake_case ( self : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ): if not os.path.isdir(__lowerCamelCase ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return SCREAMING_SNAKE_CASE = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) SCREAMING_SNAKE_CASE = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + "\n" ) SCREAMING_SNAKE_CASE = 0 with open(__lowerCamelCase , "w" , encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase : kv[1] ): if index != token_index: logger.warning( f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive." " Please check that the tokenizer is not corrupted!" ) SCREAMING_SNAKE_CASE = token_index writer.write(" ".join(__lowerCamelCase ) + "\n" ) index += 1 return vocab_file, merge_file def _snake_case ( self : Tuple , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase ) if token_ids_a is None: return [1] + ([0] * len(__lowerCamelCase )) + [1] return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1] def _snake_case ( self : str , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ): SCREAMING_SNAKE_CASE = [self.sep_token_id] SCREAMING_SNAKE_CASE = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _snake_case ( self : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict=False , **__lowerCamelCase : Optional[int] ): SCREAMING_SNAKE_CASE = kwargs.pop("add_prefix_space" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(__lowerCamelCase ) > 0 and not text[0].isspace()): SCREAMING_SNAKE_CASE = " " + text return (text, kwargs) def _snake_case ( self : Dict , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ): return token_ids_a + [self.eos_token_id] def _snake_case ( self : Tuple , __lowerCamelCase : "Conversation" ): SCREAMING_SNAKE_CASE = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(" " + text ) else: # Generated responses should contain them already. inputs.append(__lowerCamelCase ) SCREAMING_SNAKE_CASE = " ".join(__lowerCamelCase ) SCREAMING_SNAKE_CASE = self.encode(__lowerCamelCase ) if len(__lowerCamelCase ) > self.model_max_length: SCREAMING_SNAKE_CASE = input_ids[-self.model_max_length :] logger.warning(f"Trimmed input from conversation as it was longer than {self.model_max_length} tokens." ) return input_ids
698
1
from typing import Any, Callable, Dict, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker __A : List[str] = 'CompVis/stable-diffusion-v1-1' __A : Tuple = 'CompVis/stable-diffusion-v1-2' __A : List[Any] = 'CompVis/stable-diffusion-v1-3' __A : str = 'CompVis/stable-diffusion-v1-4' class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' def __init__( self : List[str] , __lowerCamelCase : AutoencoderKL , __lowerCamelCase : CLIPTextModel , __lowerCamelCase : CLIPTokenizer , __lowerCamelCase : UNetaDConditionModel , __lowerCamelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __lowerCamelCase : StableDiffusionSafetyChecker , __lowerCamelCase : CLIPImageProcessor , __lowerCamelCase : bool = True , ): super()._init_() SCREAMING_SNAKE_CASE = StableDiffusionPipeline.from_pretrained(__lowerCamelCase ) SCREAMING_SNAKE_CASE = StableDiffusionPipeline.from_pretrained(__lowerCamelCase ) SCREAMING_SNAKE_CASE = StableDiffusionPipeline.from_pretrained(__lowerCamelCase ) SCREAMING_SNAKE_CASE = StableDiffusionPipeline( vae=__lowerCamelCase , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , unet=__lowerCamelCase , scheduler=__lowerCamelCase , safety_checker=__lowerCamelCase , feature_extractor=__lowerCamelCase , requires_safety_checker=__lowerCamelCase , ) self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea ) @property def _snake_case ( self : Union[str, Any] ): return {k: getattr(self , __lowerCamelCase ) for k in self.config.keys() if not k.startswith("_" )} def _snake_case ( self : Any , __lowerCamelCase : Optional[Union[str, int]] = "auto" ): if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory SCREAMING_SNAKE_CASE = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(__lowerCamelCase ) def _snake_case ( self : Any ): self.enable_attention_slicing(__lowerCamelCase ) @torch.no_grad() def _snake_case ( self : str , __lowerCamelCase : Union[str, List[str]] , __lowerCamelCase : int = 512 , __lowerCamelCase : int = 512 , __lowerCamelCase : int = 50 , __lowerCamelCase : float = 7.5 , __lowerCamelCase : Optional[Union[str, List[str]]] = None , __lowerCamelCase : Optional[int] = 1 , __lowerCamelCase : float = 0.0 , __lowerCamelCase : Optional[torch.Generator] = None , __lowerCamelCase : Optional[torch.FloatTensor] = None , __lowerCamelCase : Optional[str] = "pil" , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __lowerCamelCase : int = 1 , **__lowerCamelCase : Optional[Any] , ): return self.pipea( prompt=__lowerCamelCase , height=__lowerCamelCase , width=__lowerCamelCase , num_inference_steps=__lowerCamelCase , guidance_scale=__lowerCamelCase , negative_prompt=__lowerCamelCase , num_images_per_prompt=__lowerCamelCase , eta=__lowerCamelCase , generator=__lowerCamelCase , latents=__lowerCamelCase , output_type=__lowerCamelCase , return_dict=__lowerCamelCase , callback=__lowerCamelCase , callback_steps=__lowerCamelCase , **__lowerCamelCase , ) @torch.no_grad() def _snake_case ( self : Dict , __lowerCamelCase : Union[str, List[str]] , __lowerCamelCase : int = 512 , __lowerCamelCase : int = 512 , __lowerCamelCase : int = 50 , __lowerCamelCase : float = 7.5 , __lowerCamelCase : Optional[Union[str, List[str]]] = None , __lowerCamelCase : Optional[int] = 1 , __lowerCamelCase : float = 0.0 , __lowerCamelCase : Optional[torch.Generator] = None , __lowerCamelCase : Optional[torch.FloatTensor] = None , __lowerCamelCase : Optional[str] = "pil" , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __lowerCamelCase : int = 1 , **__lowerCamelCase : List[Any] , ): return self.pipea( prompt=__lowerCamelCase , height=__lowerCamelCase , width=__lowerCamelCase , num_inference_steps=__lowerCamelCase , guidance_scale=__lowerCamelCase , negative_prompt=__lowerCamelCase , num_images_per_prompt=__lowerCamelCase , eta=__lowerCamelCase , generator=__lowerCamelCase , latents=__lowerCamelCase , output_type=__lowerCamelCase , return_dict=__lowerCamelCase , callback=__lowerCamelCase , callback_steps=__lowerCamelCase , **__lowerCamelCase , ) @torch.no_grad() def _snake_case ( self : Optional[Any] , __lowerCamelCase : Union[str, List[str]] , __lowerCamelCase : int = 512 , __lowerCamelCase : int = 512 , __lowerCamelCase : int = 50 , __lowerCamelCase : float = 7.5 , __lowerCamelCase : Optional[Union[str, List[str]]] = None , __lowerCamelCase : Optional[int] = 1 , __lowerCamelCase : float = 0.0 , __lowerCamelCase : Optional[torch.Generator] = None , __lowerCamelCase : Optional[torch.FloatTensor] = None , __lowerCamelCase : Optional[str] = "pil" , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __lowerCamelCase : int = 1 , **__lowerCamelCase : str , ): return self.pipea( prompt=__lowerCamelCase , height=__lowerCamelCase , width=__lowerCamelCase , num_inference_steps=__lowerCamelCase , guidance_scale=__lowerCamelCase , negative_prompt=__lowerCamelCase , num_images_per_prompt=__lowerCamelCase , eta=__lowerCamelCase , generator=__lowerCamelCase , latents=__lowerCamelCase , output_type=__lowerCamelCase , return_dict=__lowerCamelCase , callback=__lowerCamelCase , callback_steps=__lowerCamelCase , **__lowerCamelCase , ) @torch.no_grad() def _snake_case ( self : List[Any] , __lowerCamelCase : Union[str, List[str]] , __lowerCamelCase : int = 512 , __lowerCamelCase : int = 512 , __lowerCamelCase : int = 50 , __lowerCamelCase : float = 7.5 , __lowerCamelCase : Optional[Union[str, List[str]]] = None , __lowerCamelCase : Optional[int] = 1 , __lowerCamelCase : float = 0.0 , __lowerCamelCase : Optional[torch.Generator] = None , __lowerCamelCase : Optional[torch.FloatTensor] = None , __lowerCamelCase : Optional[str] = "pil" , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __lowerCamelCase : int = 1 , **__lowerCamelCase : Any , ): return self.pipea( prompt=__lowerCamelCase , height=__lowerCamelCase , width=__lowerCamelCase , num_inference_steps=__lowerCamelCase , guidance_scale=__lowerCamelCase , negative_prompt=__lowerCamelCase , num_images_per_prompt=__lowerCamelCase , eta=__lowerCamelCase , generator=__lowerCamelCase , latents=__lowerCamelCase , output_type=__lowerCamelCase , return_dict=__lowerCamelCase , callback=__lowerCamelCase , callback_steps=__lowerCamelCase , **__lowerCamelCase , ) @torch.no_grad() def _snake_case ( self : List[str] , __lowerCamelCase : Union[str, List[str]] , __lowerCamelCase : int = 512 , __lowerCamelCase : int = 512 , __lowerCamelCase : int = 50 , __lowerCamelCase : float = 7.5 , __lowerCamelCase : Optional[Union[str, List[str]]] = None , __lowerCamelCase : Optional[int] = 1 , __lowerCamelCase : float = 0.0 , __lowerCamelCase : Optional[torch.Generator] = None , __lowerCamelCase : Optional[torch.FloatTensor] = None , __lowerCamelCase : Optional[str] = "pil" , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __lowerCamelCase : int = 1 , **__lowerCamelCase : List[Any] , ): SCREAMING_SNAKE_CASE = "cuda" if torch.cuda.is_available() else "cpu" self.to(__lowerCamelCase ) # Checks if the height and width are divisible by 8 or not if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` must be divisible by 8 but are {height} and {width}." ) # Get first result from Stable Diffusion Checkpoint v1.1 SCREAMING_SNAKE_CASE = self.textaimg_sda_a( prompt=__lowerCamelCase , height=__lowerCamelCase , width=__lowerCamelCase , num_inference_steps=__lowerCamelCase , guidance_scale=__lowerCamelCase , negative_prompt=__lowerCamelCase , num_images_per_prompt=__lowerCamelCase , eta=__lowerCamelCase , generator=__lowerCamelCase , latents=__lowerCamelCase , output_type=__lowerCamelCase , return_dict=__lowerCamelCase , callback=__lowerCamelCase , callback_steps=__lowerCamelCase , **__lowerCamelCase , ) # Get first result from Stable Diffusion Checkpoint v1.2 SCREAMING_SNAKE_CASE = self.textaimg_sda_a( prompt=__lowerCamelCase , height=__lowerCamelCase , width=__lowerCamelCase , num_inference_steps=__lowerCamelCase , guidance_scale=__lowerCamelCase , negative_prompt=__lowerCamelCase , num_images_per_prompt=__lowerCamelCase , eta=__lowerCamelCase , generator=__lowerCamelCase , latents=__lowerCamelCase , output_type=__lowerCamelCase , return_dict=__lowerCamelCase , callback=__lowerCamelCase , callback_steps=__lowerCamelCase , **__lowerCamelCase , ) # Get first result from Stable Diffusion Checkpoint v1.3 SCREAMING_SNAKE_CASE = self.textaimg_sda_a( prompt=__lowerCamelCase , height=__lowerCamelCase , width=__lowerCamelCase , num_inference_steps=__lowerCamelCase , guidance_scale=__lowerCamelCase , negative_prompt=__lowerCamelCase , num_images_per_prompt=__lowerCamelCase , eta=__lowerCamelCase , generator=__lowerCamelCase , latents=__lowerCamelCase , output_type=__lowerCamelCase , return_dict=__lowerCamelCase , callback=__lowerCamelCase , callback_steps=__lowerCamelCase , **__lowerCamelCase , ) # Get first result from Stable Diffusion Checkpoint v1.4 SCREAMING_SNAKE_CASE = self.textaimg_sda_a( prompt=__lowerCamelCase , height=__lowerCamelCase , width=__lowerCamelCase , num_inference_steps=__lowerCamelCase , guidance_scale=__lowerCamelCase , negative_prompt=__lowerCamelCase , num_images_per_prompt=__lowerCamelCase , eta=__lowerCamelCase , generator=__lowerCamelCase , latents=__lowerCamelCase , output_type=__lowerCamelCase , return_dict=__lowerCamelCase , callback=__lowerCamelCase , callback_steps=__lowerCamelCase , **__lowerCamelCase , ) # Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
698
from __future__ import annotations from cmath import sqrt def __a ( A__ : int , A__ : int , A__ : int ): if a == 0: raise ValueError("Coefficient 'a' must not be zero." ) SCREAMING_SNAKE_CASE = b * b - 4 * a * c SCREAMING_SNAKE_CASE = (-b + sqrt(A__ )) / (2 * a) SCREAMING_SNAKE_CASE = (-b - sqrt(A__ )) / (2 * a) return ( root_a.real if not root_a.imag else root_a, root_a.real if not root_a.imag else root_a, ) def __a ( ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = quadratic_roots(a=5 , b=6 , c=1 ) print(F"The solutions are: {solutiona} and {solutiona}" ) if __name__ == "__main__": main()
698
1
import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ): '''simple docstring''' @register_to_config def __init__( self : int , *, __lowerCamelCase : int = 4 , __lowerCamelCase : int = 768 , __lowerCamelCase : int , __lowerCamelCase : Union[str, Any] , ): super().__init__() SCREAMING_SNAKE_CASE = nn.Parameter(torch.zeros(__lowerCamelCase ) ) # parameters for additional clip time embeddings SCREAMING_SNAKE_CASE = nn.Linear(__lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE = nn.Linear(__lowerCamelCase , __lowerCamelCase ) # parameters for encoder hidden states SCREAMING_SNAKE_CASE = clip_extra_context_tokens SCREAMING_SNAKE_CASE = nn.Linear( __lowerCamelCase , self.clip_extra_context_tokens * cross_attention_dim ) SCREAMING_SNAKE_CASE = nn.Linear(__lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE = nn.LayerNorm(__lowerCamelCase ) def _snake_case ( self : Dict , *, __lowerCamelCase : Tuple , __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Any ): if do_classifier_free_guidance: # Add the classifier free guidance embeddings to the image embeddings SCREAMING_SNAKE_CASE = image_embeddings.shape[0] SCREAMING_SNAKE_CASE = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 ) SCREAMING_SNAKE_CASE = classifier_free_guidance_embeddings.expand( __lowerCamelCase , -1 ) SCREAMING_SNAKE_CASE = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 ) # The image embeddings batch size and the text embeddings batch size are equal assert image_embeddings.shape[0] == prompt_embeds.shape[0] SCREAMING_SNAKE_CASE = prompt_embeds.shape[0] # "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and # adding CLIP embeddings to the existing timestep embedding, ... SCREAMING_SNAKE_CASE = self.embedding_proj(__lowerCamelCase ) SCREAMING_SNAKE_CASE = self.clip_image_embeddings_project_to_time_embeddings(__lowerCamelCase ) SCREAMING_SNAKE_CASE = time_projected_image_embeddings + time_projected_prompt_embeds # ... and by projecting CLIP embeddings into four # extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder" SCREAMING_SNAKE_CASE = self.clip_extra_context_tokens_proj(__lowerCamelCase ) SCREAMING_SNAKE_CASE = clip_extra_context_tokens.reshape(__lowerCamelCase , -1 , self.clip_extra_context_tokens ) SCREAMING_SNAKE_CASE = clip_extra_context_tokens.permute(0 , 2 , 1 ) SCREAMING_SNAKE_CASE = self.encoder_hidden_states_proj(__lowerCamelCase ) SCREAMING_SNAKE_CASE = self.text_encoder_hidden_states_norm(__lowerCamelCase ) SCREAMING_SNAKE_CASE = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 ) return text_encoder_hidden_states, additive_clip_time_embeddings
698
import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self : Dict ): SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__lowerCamelCase ) SCREAMING_SNAKE_CASE = -1 SCREAMING_SNAKE_CASE = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCamelCase ) SCREAMING_SNAKE_CASE = model.generate(__lowerCamelCase , max_new_tokens=10 , do_sample=__lowerCamelCase ) SCREAMING_SNAKE_CASE = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: SCREAMING_SNAKE_CASE = TextStreamer(__lowerCamelCase ) model.generate(__lowerCamelCase , max_new_tokens=10 , do_sample=__lowerCamelCase , streamer=__lowerCamelCase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer SCREAMING_SNAKE_CASE = cs.out[:-1] self.assertEqual(__lowerCamelCase , __lowerCamelCase ) def _snake_case ( self : int ): SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__lowerCamelCase ) SCREAMING_SNAKE_CASE = -1 SCREAMING_SNAKE_CASE = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCamelCase ) SCREAMING_SNAKE_CASE = model.generate(__lowerCamelCase , max_new_tokens=10 , do_sample=__lowerCamelCase ) SCREAMING_SNAKE_CASE = tokenizer.decode(greedy_ids[0] ) SCREAMING_SNAKE_CASE = TextIteratorStreamer(__lowerCamelCase ) SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer} SCREAMING_SNAKE_CASE = Thread(target=model.generate , kwargs=__lowerCamelCase ) thread.start() SCREAMING_SNAKE_CASE = "" for new_text in streamer: streamer_text += new_text self.assertEqual(__lowerCamelCase , __lowerCamelCase ) def _snake_case ( self : List[str] ): SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__lowerCamelCase ) SCREAMING_SNAKE_CASE = -1 SCREAMING_SNAKE_CASE = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCamelCase ) SCREAMING_SNAKE_CASE = model.generate(__lowerCamelCase , max_new_tokens=10 , do_sample=__lowerCamelCase ) SCREAMING_SNAKE_CASE = greedy_ids[:, input_ids.shape[1] :] SCREAMING_SNAKE_CASE = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: SCREAMING_SNAKE_CASE = TextStreamer(__lowerCamelCase , skip_prompt=__lowerCamelCase ) model.generate(__lowerCamelCase , max_new_tokens=10 , do_sample=__lowerCamelCase , streamer=__lowerCamelCase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer SCREAMING_SNAKE_CASE = cs.out[:-1] self.assertEqual(__lowerCamelCase , __lowerCamelCase ) def _snake_case ( self : int ): # Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested # with actual models -- the dummy models' tokenizers are not aligned with their models, and # `skip_special_tokens=True` has no effect on them SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("distilgpt2" ) SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(__lowerCamelCase ) SCREAMING_SNAKE_CASE = -1 SCREAMING_SNAKE_CASE = torch.ones((1, 5) , device=__lowerCamelCase ).long() * model.config.bos_token_id with CaptureStdout() as cs: SCREAMING_SNAKE_CASE = TextStreamer(__lowerCamelCase , skip_special_tokens=__lowerCamelCase ) model.generate(__lowerCamelCase , max_new_tokens=1 , do_sample=__lowerCamelCase , streamer=__lowerCamelCase ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token SCREAMING_SNAKE_CASE = cs.out[:-1] # Remove the final "\n" SCREAMING_SNAKE_CASE = tokenizer(__lowerCamelCase , return_tensors="pt" ) self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) ) def _snake_case ( self : Optional[int] ): SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__lowerCamelCase ) SCREAMING_SNAKE_CASE = -1 SCREAMING_SNAKE_CASE = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCamelCase ) SCREAMING_SNAKE_CASE = TextIteratorStreamer(__lowerCamelCase , timeout=0.001 ) SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer} SCREAMING_SNAKE_CASE = Thread(target=model.generate , kwargs=__lowerCamelCase ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(__lowerCamelCase ): SCREAMING_SNAKE_CASE = "" for new_text in streamer: streamer_text += new_text
698
1
import argparse import json import os from pathlib import Path import requests import torch from transformers import JukeboxConfig, JukeboxModel from transformers.utils import logging logging.set_verbosity_info() __A : Union[str, Any] = logging.get_logger(__name__) __A : int = 'https://openaipublic.azureedge.net/jukebox/models/' __A : Optional[Any] = { 'jukebox-1b-lyrics': [ '5b/vqvae.pth.tar', '5b/prior_level_0.pth.tar', '5b/prior_level_1.pth.tar', '1b_lyrics/prior_level_2.pth.tar', ], 'jukebox-5b-lyrics': [ '5b/vqvae.pth.tar', '5b/prior_level_0.pth.tar', '5b/prior_level_1.pth.tar', '5b_lyrics/prior_level_2.pth.tar', ], } def __a ( A__ : int ): if key.endswith(".model.1.bias" ) and len(key.split("." ) ) > 10: SCREAMING_SNAKE_CASE = key.replace(".model.1.bias" , ".conv1d_1.bias" ) elif key.endswith(".model.1.weight" ) and len(key.split("." ) ) > 10: SCREAMING_SNAKE_CASE = key.replace(".model.1.weight" , ".conv1d_1.weight" ) elif key.endswith(".model.3.bias" ) and len(key.split("." ) ) > 10: SCREAMING_SNAKE_CASE = key.replace(".model.3.bias" , ".conv1d_2.bias" ) elif key.endswith(".model.3.weight" ) and len(key.split("." ) ) > 10: SCREAMING_SNAKE_CASE = key.replace(".model.3.weight" , ".conv1d_2.weight" ) if "conditioner_blocks.0." in key: SCREAMING_SNAKE_CASE = key.replace("conditioner_blocks.0" , "conditioner_blocks" ) if "prime_prior" in key: SCREAMING_SNAKE_CASE = key.replace("prime_prior" , "encoder" ) if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key: SCREAMING_SNAKE_CASE = key.replace(".emb." , "." ) if key.endswith("k" ): # replace vqvae.X.k with vqvae.X.codebook return key.replace(".k" , ".codebook" ) if "y_emb." in key: return key.replace("y_emb." , "metadata_embedding." ) if "x_emb.emb." in key: SCREAMING_SNAKE_CASE = key.replace("0.x_emb.emb" , "embed_tokens" ) if "prime_state_ln" in key: return key.replace("prime_state_ln" , "encoder.final_layer_norm" ) if ".ln" in key: return key.replace(".ln" , ".layer_norm" ) if "_ln" in key: return key.replace("_ln" , "_layer_norm" ) if "prime_state_proj" in key: return key.replace("prime_state_proj" , "encoder.proj_in" ) if "prime_x_out" in key: return key.replace("prime_x_out" , "encoder.lm_head" ) if "prior.x_out" in key: return key.replace("x_out" , "fc_proj_out" ) if "x_emb" in key: return key.replace("x_emb" , "embed_tokens" ) return key def __a ( A__ : Optional[Any] , A__ : Dict , A__ : Tuple , A__ : Optional[Any] ): SCREAMING_SNAKE_CASE = {} import re SCREAMING_SNAKE_CASE = re.compile(R"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" ) SCREAMING_SNAKE_CASE = re.compile( R"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" ) SCREAMING_SNAKE_CASE = re.compile(R"encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" ) SCREAMING_SNAKE_CASE = re.compile(R"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" ) SCREAMING_SNAKE_CASE = re.compile( R"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" ) SCREAMING_SNAKE_CASE = re.compile(R"decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" ) SCREAMING_SNAKE_CASE = re.compile(R"conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)" ) SCREAMING_SNAKE_CASE = re.compile( R"conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" ) SCREAMING_SNAKE_CASE = re.compile(R"conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)" ) for original_key, value in state_dict.items(): # rename vqvae.encoder keys if re_encoder_block_conv_in.fullmatch(A__ ): SCREAMING_SNAKE_CASE = re_encoder_block_conv_in.match(A__ ) SCREAMING_SNAKE_CASE = regex_match.groups() SCREAMING_SNAKE_CASE = int(groups[2] ) * 2 + int(groups[3] ) SCREAMING_SNAKE_CASE = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}" SCREAMING_SNAKE_CASE = re_encoder_block_conv_in.sub(A__ , A__ ) elif re_encoder_block_resnet.fullmatch(A__ ): SCREAMING_SNAKE_CASE = re_encoder_block_resnet.match(A__ ) SCREAMING_SNAKE_CASE = regex_match.groups() SCREAMING_SNAKE_CASE = int(groups[2] ) * 2 + int(groups[3] ) SCREAMING_SNAKE_CASE = {"1": 1, "3": 2}[groups[-2]] SCREAMING_SNAKE_CASE = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}." SCREAMING_SNAKE_CASE = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}" SCREAMING_SNAKE_CASE = prefix + resnet_block SCREAMING_SNAKE_CASE = re_encoder_block_resnet.sub(A__ , A__ ) elif re_encoder_block_proj_out.fullmatch(A__ ): SCREAMING_SNAKE_CASE = re_encoder_block_proj_out.match(A__ ) SCREAMING_SNAKE_CASE = regex_match.groups() SCREAMING_SNAKE_CASE = F"encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}" SCREAMING_SNAKE_CASE = re_encoder_block_proj_out.sub(A__ , A__ ) # rename vqvae.decoder keys elif re_decoder_block_conv_out.fullmatch(A__ ): SCREAMING_SNAKE_CASE = re_decoder_block_conv_out.match(A__ ) SCREAMING_SNAKE_CASE = regex_match.groups() SCREAMING_SNAKE_CASE = int(groups[2] ) * 2 + int(groups[3] ) - 2 SCREAMING_SNAKE_CASE = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}" SCREAMING_SNAKE_CASE = re_decoder_block_conv_out.sub(A__ , A__ ) elif re_decoder_block_resnet.fullmatch(A__ ): SCREAMING_SNAKE_CASE = re_decoder_block_resnet.match(A__ ) SCREAMING_SNAKE_CASE = regex_match.groups() SCREAMING_SNAKE_CASE = int(groups[2] ) * 2 + int(groups[3] ) - 2 SCREAMING_SNAKE_CASE = {"1": 1, "3": 2}[groups[-2]] SCREAMING_SNAKE_CASE = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}." SCREAMING_SNAKE_CASE = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}" SCREAMING_SNAKE_CASE = prefix + resnet_block SCREAMING_SNAKE_CASE = re_decoder_block_resnet.sub(A__ , A__ ) elif re_decoder_block_proj_in.fullmatch(A__ ): SCREAMING_SNAKE_CASE = re_decoder_block_proj_in.match(A__ ) SCREAMING_SNAKE_CASE = regex_match.groups() SCREAMING_SNAKE_CASE = F"decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}" SCREAMING_SNAKE_CASE = re_decoder_block_proj_in.sub(A__ , A__ ) # rename prior cond.model to upsampler.upsample_block and resnet elif re_prior_cond_conv_out.fullmatch(A__ ): SCREAMING_SNAKE_CASE = re_prior_cond_conv_out.match(A__ ) SCREAMING_SNAKE_CASE = regex_match.groups() SCREAMING_SNAKE_CASE = int(groups[1] ) * 2 + int(groups[2] ) - 2 SCREAMING_SNAKE_CASE = F"conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}" SCREAMING_SNAKE_CASE = re_prior_cond_conv_out.sub(A__ , A__ ) elif re_prior_cond_resnet.fullmatch(A__ ): SCREAMING_SNAKE_CASE = re_prior_cond_resnet.match(A__ ) SCREAMING_SNAKE_CASE = regex_match.groups() SCREAMING_SNAKE_CASE = int(groups[1] ) * 2 + int(groups[2] ) - 2 SCREAMING_SNAKE_CASE = {"1": 1, "3": 2}[groups[-2]] SCREAMING_SNAKE_CASE = F"conditioner_blocks.upsampler.upsample_block.{block_index}." SCREAMING_SNAKE_CASE = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}" SCREAMING_SNAKE_CASE = prefix + resnet_block SCREAMING_SNAKE_CASE = re_prior_cond_resnet.sub(A__ , A__ ) elif re_prior_cond_proj_in.fullmatch(A__ ): SCREAMING_SNAKE_CASE = re_prior_cond_proj_in.match(A__ ) SCREAMING_SNAKE_CASE = regex_match.groups() SCREAMING_SNAKE_CASE = F"conditioner_blocks.upsampler.proj_in.{groups[-1]}" SCREAMING_SNAKE_CASE = re_prior_cond_proj_in.sub(A__ , A__ ) # keep original key else: SCREAMING_SNAKE_CASE = original_key SCREAMING_SNAKE_CASE = replace_key(A__ ) if F"{key_prefix}.{key}" not in model_state_dict or key is None: print(F"failed converting {original_key} to {key}, does not match" ) # handle missmatched shape elif value.shape != model_state_dict[F"{key_prefix}.{key}"].shape: SCREAMING_SNAKE_CASE = model_state_dict[F"{key_prefix}.{key}"] print(F"{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match" ) SCREAMING_SNAKE_CASE = original_key SCREAMING_SNAKE_CASE = original_key SCREAMING_SNAKE_CASE = value return new_dict @torch.no_grad() def __a ( A__ : Any=None , A__ : Optional[int]=None ): for file in MODEL_MAPPING[model_name]: if not os.path.isfile(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" ): SCREAMING_SNAKE_CASE = requests.get(F"{PREFIX}{file}" , allow_redirects=A__ ) os.makedirs(F"{pytorch_dump_folder_path}/" , exist_ok=A__ ) open(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" , "wb" ).write(r.content ) SCREAMING_SNAKE_CASE = MODEL_MAPPING[model_name.split("/" )[-1]] SCREAMING_SNAKE_CASE = JukeboxConfig.from_pretrained(A__ ) SCREAMING_SNAKE_CASE = JukeboxModel(A__ ) SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = {} for i, dict_name in enumerate(A__ ): SCREAMING_SNAKE_CASE = torch.load(F"{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}" )["model"] SCREAMING_SNAKE_CASE = {} for k in old_dic.keys(): if k.endswith(".b" ): SCREAMING_SNAKE_CASE = old_dic[k] elif k.endswith(".w" ): SCREAMING_SNAKE_CASE = old_dic[k] elif "level_2" not in dict_name and "cond.model." in k: SCREAMING_SNAKE_CASE = old_dic[k] else: SCREAMING_SNAKE_CASE = old_dic[k] SCREAMING_SNAKE_CASE = "vqvae" if i == 0 else F"priors.{3 - i}" SCREAMING_SNAKE_CASE = fix_jukebox_keys(A__ , model.state_dict() , A__ , A__ ) weight_dict.append(A__ ) SCREAMING_SNAKE_CASE = weight_dict.pop(0 ) model.vqvae.load_state_dict(A__ ) for i in range(len(A__ ) ): model.priors[i].load_state_dict(weight_dict[2 - i] ) Path(A__ ).mkdir(exist_ok=A__ ) with open(F"{pytorch_dump_folder_path}/mapping.json" , "w" ) as txtfile: json.dump(A__ , A__ ) print(F"Saving model {model_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(A__ ) return weight_dict if __name__ == "__main__": __A : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='jukebox-5b-lyrics', type=str, help='Name of the model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default='jukebox-5b-lyrics-converted', type=str, help='Path to the output PyTorch model directory.', ) __A : Dict = parser.parse_args() convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
698
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) __A : Union[str, Any] = {'configuration_fnet': ['FNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FNetConfig']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : List[str] = ['FNetTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Any = ['FNetTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Union[str, Any] = [ 'FNET_PRETRAINED_MODEL_ARCHIVE_LIST', 'FNetForMaskedLM', 'FNetForMultipleChoice', 'FNetForNextSentencePrediction', 'FNetForPreTraining', 'FNetForQuestionAnswering', 'FNetForSequenceClassification', 'FNetForTokenClassification', 'FNetLayer', 'FNetModel', 'FNetPreTrainedModel', ] if TYPE_CHECKING: from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet import FNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet_fast import FNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_fnet import ( FNET_PRETRAINED_MODEL_ARCHIVE_LIST, FNetForMaskedLM, FNetForMultipleChoice, FNetForNextSentencePrediction, FNetForPreTraining, FNetForQuestionAnswering, FNetForSequenceClassification, FNetForTokenClassification, FNetLayer, FNetModel, FNetPreTrainedModel, ) else: import sys __A : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
698
1
import functools import logging import os import sys import threading from logging import ( CRITICAL, # NOQA DEBUG, # NOQA ERROR, # NOQA FATAL, # NOQA INFO, # NOQA NOTSET, # NOQA WARN, # NOQA WARNING, # NOQA ) from typing import Optional import huggingface_hub.utils as hf_hub_utils from tqdm import auto as tqdm_lib __A : Dict = threading.Lock() __A : Optional[logging.Handler] = None __A : Any = { 'debug': logging.DEBUG, 'info': logging.INFO, 'warning': logging.WARNING, 'error': logging.ERROR, 'critical': logging.CRITICAL, } __A : int = logging.WARNING __A : int = True def __a ( ): SCREAMING_SNAKE_CASE = os.getenv("TRANSFORMERS_VERBOSITY" , A__ ) if env_level_str: if env_level_str in log_levels: return log_levels[env_level_str] else: logging.getLogger().warning( F"Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, " F"has to be one of: { ', '.join(log_levels.keys() ) }" ) return _default_log_level def __a ( ): return __name__.split("." )[0] def __a ( ): return logging.getLogger(_get_library_name() ) def __a ( ): global _default_handler with _lock: if _default_handler: # This library has already configured the library root logger. return SCREAMING_SNAKE_CASE = logging.StreamHandler() # Set sys.stderr as stream. SCREAMING_SNAKE_CASE = sys.stderr.flush # Apply our default configuration to the library root logger. SCREAMING_SNAKE_CASE = _get_library_root_logger() library_root_logger.addHandler(_default_handler ) library_root_logger.setLevel(_get_default_logging_level() ) SCREAMING_SNAKE_CASE = False def __a ( ): global _default_handler with _lock: if not _default_handler: return SCREAMING_SNAKE_CASE = _get_library_root_logger() library_root_logger.removeHandler(_default_handler ) library_root_logger.setLevel(logging.NOTSET ) SCREAMING_SNAKE_CASE = None def __a ( ): return log_levels def __a ( A__ : Optional[str] = None ): if name is None: SCREAMING_SNAKE_CASE = _get_library_name() _configure_library_root_logger() return logging.getLogger(A__ ) def __a ( ): _configure_library_root_logger() return _get_library_root_logger().getEffectiveLevel() def __a ( A__ : int ): _configure_library_root_logger() _get_library_root_logger().setLevel(A__ ) def __a ( ): return set_verbosity(A__ ) def __a ( ): return set_verbosity(A__ ) def __a ( ): return set_verbosity(A__ ) def __a ( ): return set_verbosity(A__ ) def __a ( ): _configure_library_root_logger() assert _default_handler is not None _get_library_root_logger().removeHandler(_default_handler ) def __a ( ): _configure_library_root_logger() assert _default_handler is not None _get_library_root_logger().addHandler(_default_handler ) def __a ( A__ : logging.Handler ): _configure_library_root_logger() assert handler is not None _get_library_root_logger().addHandler(A__ ) def __a ( A__ : logging.Handler ): _configure_library_root_logger() assert handler is not None and handler not in _get_library_root_logger().handlers _get_library_root_logger().removeHandler(A__ ) def __a ( ): _configure_library_root_logger() SCREAMING_SNAKE_CASE = False def __a ( ): _configure_library_root_logger() SCREAMING_SNAKE_CASE = True def __a ( ): SCREAMING_SNAKE_CASE = _get_library_root_logger().handlers for handler in handlers: SCREAMING_SNAKE_CASE = logging.Formatter("[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s" ) handler.setFormatter(A__ ) def __a ( ): SCREAMING_SNAKE_CASE = _get_library_root_logger().handlers for handler in handlers: handler.setFormatter(A__ ) def __a ( self : List[Any] , *A__ : Union[str, Any] , **A__ : List[str] ): SCREAMING_SNAKE_CASE = os.getenv("TRANSFORMERS_NO_ADVISORY_WARNINGS" , A__ ) if no_advisory_warnings: return self.warning(*A__ , **A__ ) __A : int = warning_advice @functools.lru_cache(A__ ) def __a ( self : Dict , *A__ : Optional[int] , **A__ : Dict ): self.warning(*A__ , **A__ ) __A : Tuple = warning_once class _SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : Optional[Any] , *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : Union[str, Any] ): # pylint: disable=unused-argument SCREAMING_SNAKE_CASE = args[0] if args else None def __iter__( self : List[Any] ): return iter(self._iterator ) def __getattr__( self : List[Any] , __lowerCamelCase : Tuple ): def empty_fn(*__lowerCamelCase : Tuple , **__lowerCamelCase : List[str] ): # pylint: disable=unused-argument return return empty_fn def __enter__( self : Any ): return self def __exit__( self : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] ): return class _SCREAMING_SNAKE_CASE : '''simple docstring''' def __call__( self : Optional[int] , *__lowerCamelCase : Dict , **__lowerCamelCase : List[Any] ): if _tqdm_active: return tqdm_lib.tqdm(*__lowerCamelCase , **__lowerCamelCase ) else: return EmptyTqdm(*__lowerCamelCase , **__lowerCamelCase ) def _snake_case ( self : Tuple , *__lowerCamelCase : int , **__lowerCamelCase : List[Any] ): SCREAMING_SNAKE_CASE = None if _tqdm_active: return tqdm_lib.tqdm.set_lock(*__lowerCamelCase , **__lowerCamelCase ) def _snake_case ( self : List[Any] ): if _tqdm_active: return tqdm_lib.tqdm.get_lock() __A : List[str] = _tqdm_cls() def __a ( ): global _tqdm_active return bool(_tqdm_active ) def __a ( ): global _tqdm_active SCREAMING_SNAKE_CASE = True hf_hub_utils.enable_progress_bars() def __a ( ): global _tqdm_active SCREAMING_SNAKE_CASE = False hf_hub_utils.disable_progress_bars()
698
from collections import Counter import numpy as np from sklearn import datasets from sklearn.model_selection import train_test_split __A : Optional[Any] = datasets.load_iris() __A : Optional[Any] = np.array(data['data']) __A : Optional[int] = np.array(data['target']) __A : Union[str, Any] = data['target_names'] __A , __A , __A , __A : Optional[int] = train_test_split(X, y) def __a ( A__ : Optional[int] , A__ : Dict ): return np.linalg.norm(np.array(A__ ) - np.array(A__ ) ) def __a ( A__ : Optional[Any] , A__ : int , A__ : Dict , A__ : Optional[Any] , A__ : Dict=5 ): SCREAMING_SNAKE_CASE = zip(A__ , A__ ) # List of distances of all points from the point to be classified SCREAMING_SNAKE_CASE = [] for data_point in data: SCREAMING_SNAKE_CASE = euclidean_distance(data_point[0] , A__ ) distances.append((distance, data_point[1]) ) # Choosing 'k' points with the least distances. SCREAMING_SNAKE_CASE = [i[1] for i in sorted(A__ )[:k]] # Most commonly occurring class among them # is the class into which the point is classified SCREAMING_SNAKE_CASE = Counter(A__ ).most_common(1 )[0][0] return classes[result] if __name__ == "__main__": print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
698
1
# using dfs for finding eulerian path traversal def __a ( A__ : Union[str, Any] , A__ : List[Any] , A__ : Dict , A__ : int=None ): SCREAMING_SNAKE_CASE = (path or []) + [u] for v in graph[u]: if visited_edge[u][v] is False: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = True, True SCREAMING_SNAKE_CASE = dfs(A__ , A__ , A__ , A__ ) return path def __a ( A__ : int , A__ : str ): SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = -1 for i in range(A__ ): if i not in graph.keys(): continue if len(graph[i] ) % 2 == 1: odd_degree_nodes += 1 SCREAMING_SNAKE_CASE = i if odd_degree_nodes == 0: return 1, odd_node if odd_degree_nodes == 2: return 2, odd_node return 3, odd_node def __a ( A__ : Dict , A__ : List[Any] ): SCREAMING_SNAKE_CASE = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )] SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = check_circuit_or_path(A__ , A__ ) if check == 3: print("graph is not Eulerian" ) print("no path" ) return SCREAMING_SNAKE_CASE = 1 if check == 2: SCREAMING_SNAKE_CASE = odd_node print("graph has a Euler path" ) if check == 1: print("graph has a Euler cycle" ) SCREAMING_SNAKE_CASE = dfs(A__ , A__ , A__ ) print(A__ ) def __a ( ): SCREAMING_SNAKE_CASE = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]} SCREAMING_SNAKE_CASE = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]} SCREAMING_SNAKE_CASE = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]} SCREAMING_SNAKE_CASE = {1: [2, 3], 2: [1, 3], 3: [1, 2]} SCREAMING_SNAKE_CASE = { 1: [], 2: [] # all degree is zero } SCREAMING_SNAKE_CASE = 10 check_euler(A__ , A__ ) check_euler(A__ , A__ ) check_euler(A__ , A__ ) check_euler(A__ , A__ ) check_euler(A__ , A__ ) if __name__ == "__main__": main()
698
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __A : Union[str, Any] = logging.get_logger(__name__) __A : Dict = { 'microsoft/resnet-50': 'https://huggingface.co/microsoft/resnet-50/blob/main/config.json', } class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ): '''simple docstring''' lowerCamelCase__ = "resnet" lowerCamelCase__ = ["basic", "bottleneck"] def __init__( self : Optional[Any] , __lowerCamelCase : int=3 , __lowerCamelCase : Dict=64 , __lowerCamelCase : str=[256, 512, 1024, 2048] , __lowerCamelCase : str=[3, 4, 6, 3] , __lowerCamelCase : Optional[int]="bottleneck" , __lowerCamelCase : int="relu" , __lowerCamelCase : List[str]=False , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : int=None , **__lowerCamelCase : Dict , ): super().__init__(**__lowerCamelCase ) if layer_type not in self.layer_types: raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" ) SCREAMING_SNAKE_CASE = num_channels SCREAMING_SNAKE_CASE = embedding_size SCREAMING_SNAKE_CASE = hidden_sizes SCREAMING_SNAKE_CASE = depths SCREAMING_SNAKE_CASE = layer_type SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = downsample_in_first_stage SCREAMING_SNAKE_CASE = ["stem"] + [f"stage{idx}" for idx in range(1 , len(__lowerCamelCase ) + 1 )] SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_aligned_output_features_output_indices( out_features=__lowerCamelCase , out_indices=__lowerCamelCase , stage_names=self.stage_names ) class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = version.parse("1.11" ) @property def _snake_case ( self : Optional[int] ): return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def _snake_case ( self : Optional[int] ): return 1e-3
698
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __A : Any = { 'configuration_roberta': ['ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RobertaConfig', 'RobertaOnnxConfig'], 'tokenization_roberta': ['RobertaTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : str = ['RobertaTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : List[str] = [ 'ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST', 'RobertaForCausalLM', 'RobertaForMaskedLM', 'RobertaForMultipleChoice', 'RobertaForQuestionAnswering', 'RobertaForSequenceClassification', 'RobertaForTokenClassification', 'RobertaModel', 'RobertaPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Optional[Any] = [ 'TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFRobertaForCausalLM', 'TFRobertaForMaskedLM', 'TFRobertaForMultipleChoice', 'TFRobertaForQuestionAnswering', 'TFRobertaForSequenceClassification', 'TFRobertaForTokenClassification', 'TFRobertaMainLayer', 'TFRobertaModel', 'TFRobertaPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Union[str, Any] = [ 'FlaxRobertaForCausalLM', 'FlaxRobertaForMaskedLM', 'FlaxRobertaForMultipleChoice', 'FlaxRobertaForQuestionAnswering', 'FlaxRobertaForSequenceClassification', 'FlaxRobertaForTokenClassification', 'FlaxRobertaModel', 'FlaxRobertaPreTrainedModel', ] if TYPE_CHECKING: from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig from .tokenization_roberta import RobertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roberta_fast import RobertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roberta import ( ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, RobertaForCausalLM, RobertaForMaskedLM, RobertaForMultipleChoice, RobertaForQuestionAnswering, RobertaForSequenceClassification, RobertaForTokenClassification, RobertaModel, RobertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roberta import ( TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForMultipleChoice, TFRobertaForQuestionAnswering, TFRobertaForSequenceClassification, TFRobertaForTokenClassification, TFRobertaMainLayer, TFRobertaModel, TFRobertaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roberta import ( FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, FlaxRobertaPreTrainedModel, ) else: import sys __A : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
698
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, TensorType __A : str = logging.get_logger(__name__) __A : Optional[Any] = { 'openai/imagegpt-small': '', 'openai/imagegpt-medium': '', 'openai/imagegpt-large': '', } class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = "imagegpt" lowerCamelCase__ = ["past_key_values"] lowerCamelCase__ = { "hidden_size": "n_embd", "max_position_embeddings": "n_positions", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self : List[str] , __lowerCamelCase : Any=512 + 1 , __lowerCamelCase : str=32 * 32 , __lowerCamelCase : Any=512 , __lowerCamelCase : Optional[int]=24 , __lowerCamelCase : Tuple=8 , __lowerCamelCase : List[str]=None , __lowerCamelCase : List[Any]="quick_gelu" , __lowerCamelCase : int=0.1 , __lowerCamelCase : str=0.1 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Optional[Any]=1e-5 , __lowerCamelCase : Optional[Any]=0.02 , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : List[Any]=False , __lowerCamelCase : int=False , __lowerCamelCase : int=False , **__lowerCamelCase : Union[str, Any] , ): SCREAMING_SNAKE_CASE = vocab_size SCREAMING_SNAKE_CASE = n_positions SCREAMING_SNAKE_CASE = n_embd SCREAMING_SNAKE_CASE = n_layer SCREAMING_SNAKE_CASE = n_head SCREAMING_SNAKE_CASE = n_inner SCREAMING_SNAKE_CASE = activation_function SCREAMING_SNAKE_CASE = resid_pdrop SCREAMING_SNAKE_CASE = embd_pdrop SCREAMING_SNAKE_CASE = attn_pdrop SCREAMING_SNAKE_CASE = layer_norm_epsilon SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = scale_attn_weights SCREAMING_SNAKE_CASE = use_cache SCREAMING_SNAKE_CASE = scale_attn_by_inverse_layer_idx SCREAMING_SNAKE_CASE = reorder_and_upcast_attn SCREAMING_SNAKE_CASE = tie_word_embeddings super().__init__(tie_word_embeddings=__lowerCamelCase , **__lowerCamelCase ) class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' @property def _snake_case ( self : Optional[Any] ): return OrderedDict( [ ("input_ids", {0: "batch", 1: "sequence"}), ] ) def _snake_case ( self : Optional[int] , __lowerCamelCase : "FeatureExtractionMixin" , __lowerCamelCase : int = 1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional["TensorType"] = None , __lowerCamelCase : int = 3 , __lowerCamelCase : int = 32 , __lowerCamelCase : int = 32 , ): SCREAMING_SNAKE_CASE = self._generate_dummy_images(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE = dict(preprocessor(images=__lowerCamelCase , return_tensors=__lowerCamelCase ) ) return inputs
698
1
from collections import UserDict from typing import Union import numpy as np import requests from ..utils import ( add_end_docstrings, logging, ) from .audio_classification import ffmpeg_read from .base import PIPELINE_INIT_ARGS, Pipeline __A : Optional[Any] = logging.get_logger(__name__) @add_end_docstrings(__snake_case ) class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' def __init__( self : int , **__lowerCamelCase : Optional[int] ): super().__init__(**__lowerCamelCase ) if self.framework != "pt": raise ValueError(f"The {self.__class__} is only available in PyTorch." ) # No specific FOR_XXX available yet def __call__( self : Tuple , __lowerCamelCase : Union[np.ndarray, bytes, str] , **__lowerCamelCase : Union[str, Any] ): return super().__call__(__lowerCamelCase , **__lowerCamelCase ) def _snake_case ( self : Dict , **__lowerCamelCase : str ): SCREAMING_SNAKE_CASE = {} if "candidate_labels" in kwargs: SCREAMING_SNAKE_CASE = kwargs["candidate_labels"] if "hypothesis_template" in kwargs: SCREAMING_SNAKE_CASE = kwargs["hypothesis_template"] return preprocess_params, {}, {} def _snake_case ( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : List[str]=None , __lowerCamelCase : Optional[Any]="This is a sound of {}." ): if isinstance(__lowerCamelCase , __lowerCamelCase ): if audio.startswith("http://" ) or audio.startswith("https://" ): # We need to actually check for a real protocol, otherwise it's impossible to use a local file # like http_huggingface_co.png SCREAMING_SNAKE_CASE = requests.get(__lowerCamelCase ).content else: with open(__lowerCamelCase , "rb" ) as f: SCREAMING_SNAKE_CASE = f.read() if isinstance(__lowerCamelCase , __lowerCamelCase ): SCREAMING_SNAKE_CASE = ffmpeg_read(__lowerCamelCase , self.feature_extractor.sampling_rate ) if not isinstance(__lowerCamelCase , np.ndarray ): raise ValueError("We expect a numpy ndarray as input" ) if len(audio.shape ) != 1: raise ValueError("We expect a single channel audio input for ZeroShotAudioClassificationPipeline" ) SCREAMING_SNAKE_CASE = self.feature_extractor( [audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors="pt" ) SCREAMING_SNAKE_CASE = candidate_labels SCREAMING_SNAKE_CASE = [hypothesis_template.format(__lowerCamelCase ) for x in candidate_labels] SCREAMING_SNAKE_CASE = self.tokenizer(__lowerCamelCase , return_tensors=self.framework , padding=__lowerCamelCase ) SCREAMING_SNAKE_CASE = [text_inputs] return inputs def _snake_case ( self : int , __lowerCamelCase : List[Any] ): SCREAMING_SNAKE_CASE = model_inputs.pop("candidate_labels" ) SCREAMING_SNAKE_CASE = model_inputs.pop("text_inputs" ) if isinstance(text_inputs[0] , __lowerCamelCase ): SCREAMING_SNAKE_CASE = text_inputs[0] else: # Batching case. SCREAMING_SNAKE_CASE = text_inputs[0][0] SCREAMING_SNAKE_CASE = self.model(**__lowerCamelCase , **__lowerCamelCase ) SCREAMING_SNAKE_CASE = { "candidate_labels": candidate_labels, "logits": outputs.logits_per_audio, } return model_outputs def _snake_case ( self : Any , __lowerCamelCase : Optional[int] ): SCREAMING_SNAKE_CASE = model_outputs.pop("candidate_labels" ) SCREAMING_SNAKE_CASE = model_outputs["logits"][0] if self.framework == "pt": SCREAMING_SNAKE_CASE = logits.softmax(dim=0 ) SCREAMING_SNAKE_CASE = probs.tolist() else: raise ValueError("`tf` framework not supported." ) SCREAMING_SNAKE_CASE = [ {"score": score, "label": candidate_label} for score, candidate_label in sorted(zip(__lowerCamelCase , __lowerCamelCase ) , key=lambda __lowerCamelCase : -x[0] ) ] return result
698
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments from transformers.testing_utils import TestCasePlus, require_torch, slow from transformers.utils import is_datasets_available if is_datasets_available(): import datasets class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' @slow @require_torch def _snake_case ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE = EncoderDecoderModel.from_encoder_decoder_pretrained("prajjwal1/bert-tiny" , "prajjwal1/bert-tiny" ) SCREAMING_SNAKE_CASE = BertTokenizer.from_pretrained("bert-base-uncased" ) SCREAMING_SNAKE_CASE = bertabert.config.encoder.vocab_size SCREAMING_SNAKE_CASE = tokenizer.sep_token_id SCREAMING_SNAKE_CASE = tokenizer.cls_token_id SCREAMING_SNAKE_CASE = 128 SCREAMING_SNAKE_CASE = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="train[:1%]" ) SCREAMING_SNAKE_CASE = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="validation[:1%]" ) SCREAMING_SNAKE_CASE = train_dataset.select(range(32 ) ) SCREAMING_SNAKE_CASE = val_dataset.select(range(16 ) ) SCREAMING_SNAKE_CASE = 4 def _map_to_encoder_decoder_inputs(__lowerCamelCase : str ): # Tokenizer will automatically set [BOS] <text> [EOS] SCREAMING_SNAKE_CASE = tokenizer(batch["article"] , padding="max_length" , truncation=__lowerCamelCase , max_length=512 ) SCREAMING_SNAKE_CASE = tokenizer(batch["highlights"] , padding="max_length" , truncation=__lowerCamelCase , max_length=128 ) SCREAMING_SNAKE_CASE = inputs.input_ids SCREAMING_SNAKE_CASE = inputs.attention_mask SCREAMING_SNAKE_CASE = outputs.input_ids SCREAMING_SNAKE_CASE = outputs.input_ids.copy() SCREAMING_SNAKE_CASE = [ [-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["labels"] ] SCREAMING_SNAKE_CASE = outputs.attention_mask assert all(len(__lowerCamelCase ) == 512 for x in inputs.input_ids ) assert all(len(__lowerCamelCase ) == 128 for x in outputs.input_ids ) return batch def _compute_metrics(__lowerCamelCase : Union[str, Any] ): SCREAMING_SNAKE_CASE = pred.label_ids SCREAMING_SNAKE_CASE = pred.predictions # all unnecessary tokens are removed SCREAMING_SNAKE_CASE = tokenizer.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase ) SCREAMING_SNAKE_CASE = tokenizer.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase ) SCREAMING_SNAKE_CASE = sum([int(pred_str[i] == label_str[i] ) for i in range(len(__lowerCamelCase ) )] ) / len(__lowerCamelCase ) return {"accuracy": accuracy} # map train dataset SCREAMING_SNAKE_CASE = train_dataset.map( _map_to_encoder_decoder_inputs , batched=__lowerCamelCase , batch_size=__lowerCamelCase , remove_columns=["article", "highlights"] , ) train_dataset.set_format( type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , ) # same for validation dataset SCREAMING_SNAKE_CASE = val_dataset.map( _map_to_encoder_decoder_inputs , batched=__lowerCamelCase , batch_size=__lowerCamelCase , remove_columns=["article", "highlights"] , ) val_dataset.set_format( type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , ) SCREAMING_SNAKE_CASE = self.get_auto_remove_tmp_dir() SCREAMING_SNAKE_CASE = SeqaSeqTrainingArguments( output_dir=__lowerCamelCase , per_device_train_batch_size=__lowerCamelCase , per_device_eval_batch_size=__lowerCamelCase , predict_with_generate=__lowerCamelCase , evaluation_strategy="steps" , do_train=__lowerCamelCase , do_eval=__lowerCamelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , ) # instantiate trainer SCREAMING_SNAKE_CASE = SeqaSeqTrainer( model=__lowerCamelCase , args=__lowerCamelCase , compute_metrics=_compute_metrics , train_dataset=__lowerCamelCase , eval_dataset=__lowerCamelCase , tokenizer=__lowerCamelCase , ) # start training trainer.train()
698
1
def __a ( A__ : list ): for i in range(len(A__ ) - 1 , 0 , -1 ): SCREAMING_SNAKE_CASE = False for j in range(A__ , 0 , -1 ): if unsorted[j] < unsorted[j - 1]: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = unsorted[j - 1], unsorted[j] SCREAMING_SNAKE_CASE = True for j in range(A__ ): if unsorted[j] > unsorted[j + 1]: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = unsorted[j + 1], unsorted[j] SCREAMING_SNAKE_CASE = True if not swapped: break return unsorted if __name__ == "__main__": import doctest doctest.testmod() __A : List[str] = input('Enter numbers separated by a comma:\n').strip() __A : List[Any] = [int(item) for item in user_input.split(',')] print(f'{cocktail_shaker_sort(unsorted) = }')
698
import inspect import unittest import warnings from math import ceil, floor from transformers import LevitConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_MAPPING, LevitForImageClassification, LevitForImageClassificationWithTeacher, LevitModel, ) from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import LevitImageProcessor class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' def _snake_case ( self : Optional[Any] ): SCREAMING_SNAKE_CASE = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(__lowerCamelCase , "hidden_sizes" ) ) self.parent.assertTrue(hasattr(__lowerCamelCase , "num_attention_heads" ) ) class _SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any]=13 , __lowerCamelCase : Optional[int]=64 , __lowerCamelCase : Dict=3 , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : int=2 , __lowerCamelCase : List[Any]=1 , __lowerCamelCase : Optional[Any]=16 , __lowerCamelCase : Tuple=[128, 256, 384] , __lowerCamelCase : int=[4, 6, 8] , __lowerCamelCase : Tuple=[2, 3, 4] , __lowerCamelCase : List[str]=[16, 16, 16] , __lowerCamelCase : int=0 , __lowerCamelCase : List[Any]=[2, 2, 2] , __lowerCamelCase : List[str]=[2, 2, 2] , __lowerCamelCase : List[str]=0.02 , __lowerCamelCase : Any=True , __lowerCamelCase : Dict=True , __lowerCamelCase : int=2 , ): SCREAMING_SNAKE_CASE = parent SCREAMING_SNAKE_CASE = batch_size SCREAMING_SNAKE_CASE = image_size SCREAMING_SNAKE_CASE = num_channels SCREAMING_SNAKE_CASE = kernel_size SCREAMING_SNAKE_CASE = stride SCREAMING_SNAKE_CASE = padding SCREAMING_SNAKE_CASE = hidden_sizes SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = depths SCREAMING_SNAKE_CASE = key_dim SCREAMING_SNAKE_CASE = drop_path_rate SCREAMING_SNAKE_CASE = patch_size SCREAMING_SNAKE_CASE = attention_ratio SCREAMING_SNAKE_CASE = mlp_ratio SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = [ ["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] SCREAMING_SNAKE_CASE = is_training SCREAMING_SNAKE_CASE = use_labels SCREAMING_SNAKE_CASE = num_labels SCREAMING_SNAKE_CASE = initializer_range def _snake_case ( self : int ): SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE = None if self.use_labels: SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels ) SCREAMING_SNAKE_CASE = self.get_config() return config, pixel_values, labels def _snake_case ( self : List[str] ): return LevitConfig( image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , ) def _snake_case ( self : Optional[int] , __lowerCamelCase : List[str] , __lowerCamelCase : str , __lowerCamelCase : Any ): SCREAMING_SNAKE_CASE = LevitModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() SCREAMING_SNAKE_CASE = model(__lowerCamelCase ) SCREAMING_SNAKE_CASE = (self.image_size, self.image_size) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = image_size[0], image_size[1] for _ in range(4 ): SCREAMING_SNAKE_CASE = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 ) SCREAMING_SNAKE_CASE = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , ) def _snake_case ( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str] ): SCREAMING_SNAKE_CASE = self.num_labels SCREAMING_SNAKE_CASE = LevitForImageClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() SCREAMING_SNAKE_CASE = model(__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _snake_case ( self : Optional[Any] ): SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = config_and_inputs SCREAMING_SNAKE_CASE = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = ( (LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher) if is_torch_available() else () ) lowerCamelCase__ = ( { "feature-extraction": LevitModel, "image-classification": (LevitForImageClassification, LevitForImageClassificationWithTeacher), } if is_torch_available() else {} ) lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False def _snake_case ( self : Dict ): SCREAMING_SNAKE_CASE = LevitModelTester(self ) SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 ) def _snake_case ( self : Any ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _snake_case ( self : Any ): return @unittest.skip(reason="Levit does not use inputs_embeds" ) def _snake_case ( self : Union[str, Any] ): pass @unittest.skip(reason="Levit does not support input and output embeddings" ) def _snake_case ( self : Tuple ): pass @unittest.skip(reason="Levit does not output attentions" ) def _snake_case ( self : Tuple ): pass def _snake_case ( self : Dict ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase ) SCREAMING_SNAKE_CASE = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCamelCase ) def _snake_case ( self : List[Any] ): def check_hidden_states_output(__lowerCamelCase : List[str] , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] ): SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) ) SCREAMING_SNAKE_CASE = outputs.hidden_states SCREAMING_SNAKE_CASE = len(self.model_tester.depths ) + 1 self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase ) SCREAMING_SNAKE_CASE = (self.model_tester.image_size, self.model_tester.image_size) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = image_size[0], image_size[1] for _ in range(4 ): SCREAMING_SNAKE_CASE = floor( ( (height + 2 * self.model_tester.padding - self.model_tester.kernel_size) / self.model_tester.stride ) + 1 ) SCREAMING_SNAKE_CASE = floor( ( (width + 2 * self.model_tester.padding - self.model_tester.kernel_size) / self.model_tester.stride ) + 1 ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [ height * width, self.model_tester.hidden_sizes[0], ] , ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def _snake_case ( self : int ): pass def _snake_case ( self : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : int=False ): SCREAMING_SNAKE_CASE = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) if return_labels: if model_class.__name__ == "LevitForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def _snake_case ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def _snake_case ( self : str ): SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase ) def _snake_case ( self : List[Any] ): if not self.model_tester.is_training: return SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE = True for model_class in self.all_model_classes: # LevitForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(__lowerCamelCase ) or model_class.__name__ == "LevitForImageClassificationWithTeacher" ): continue SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.train() SCREAMING_SNAKE_CASE = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) SCREAMING_SNAKE_CASE = model(**__lowerCamelCase ).loss loss.backward() def _snake_case ( self : Tuple ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = True for model_class in self.all_model_classes: if model_class in get_values(__lowerCamelCase ) or not model_class.supports_gradient_checkpointing: continue # LevitForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "LevitForImageClassificationWithTeacher": continue SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase ) model.gradient_checkpointing_enable() model.to(__lowerCamelCase ) model.train() SCREAMING_SNAKE_CASE = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) SCREAMING_SNAKE_CASE = model(**__lowerCamelCase ).loss loss.backward() def _snake_case ( self : List[str] ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE = [ {"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float}, {"title": "single_label_classification", "num_labels": 1, "dtype": torch.long}, {"title": "regression", "num_labels": 1, "dtype": torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(__lowerCamelCase ), ] or model_class.__name__ == "LevitForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=f"Testing {model_class} with {problem_type['title']}" ): SCREAMING_SNAKE_CASE = problem_type["title"] SCREAMING_SNAKE_CASE = problem_type["num_labels"] SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.train() SCREAMING_SNAKE_CASE = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) if problem_type["num_labels"] > 1: SCREAMING_SNAKE_CASE = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] ) SCREAMING_SNAKE_CASE = inputs["labels"].to(problem_type["dtype"] ) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=__lowerCamelCase ) as warning_list: SCREAMING_SNAKE_CASE = model(**__lowerCamelCase ).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message ): raise ValueError( f"Something is going wrong in the regression problem: intercepted {w.message}" ) loss.backward() @slow def _snake_case ( self : List[Any] ): for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE = LevitModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) def __a ( ): SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' @cached_property def _snake_case ( self : Dict ): return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) @slow def _snake_case ( self : Optional[int] ): SCREAMING_SNAKE_CASE = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to( __lowerCamelCase ) SCREAMING_SNAKE_CASE = self.default_image_processor SCREAMING_SNAKE_CASE = prepare_img() SCREAMING_SNAKE_CASE = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE = model(**__lowerCamelCase ) # verify the logits SCREAMING_SNAKE_CASE = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , __lowerCamelCase ) SCREAMING_SNAKE_CASE = torch.tensor([1.0_448, -0.3_745, -1.8_317] ).to(__lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
698
1
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = ["image_processor", "tokenizer"] lowerCamelCase__ = "ViTImageProcessor" lowerCamelCase__ = ("CLIPTokenizer", "CLIPTokenizerFast") def __init__( self : str , __lowerCamelCase : int=None , __lowerCamelCase : str=None , **__lowerCamelCase : Optional[Any] ): SCREAMING_SNAKE_CASE = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , __lowerCamelCase , ) SCREAMING_SNAKE_CASE = kwargs.pop("feature_extractor" ) SCREAMING_SNAKE_CASE = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(__lowerCamelCase , __lowerCamelCase ) def __call__( self : Tuple , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : int=None , __lowerCamelCase : Optional[Any]=None , **__lowerCamelCase : int ): if text is None and visual_prompt is None and images is None: raise ValueError("You have to specify either text, visual prompt or images." ) if text is not None and visual_prompt is not None: raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt." ) if text is not None: SCREAMING_SNAKE_CASE = self.tokenizer(__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase ) if visual_prompt is not None: SCREAMING_SNAKE_CASE = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase ) if images is not None: SCREAMING_SNAKE_CASE = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase ) if visual_prompt is not None and images is not None: SCREAMING_SNAKE_CASE = { "pixel_values": image_features.pixel_values, "conditional_pixel_values": prompt_features.pixel_values, } return encoding elif text is not None and images is not None: SCREAMING_SNAKE_CASE = image_features.pixel_values return encoding elif text is not None: return encoding elif visual_prompt is not None: SCREAMING_SNAKE_CASE = { "conditional_pixel_values": prompt_features.pixel_values, } return encoding else: return BatchEncoding(data=dict(**__lowerCamelCase ) , tensor_type=__lowerCamelCase ) def _snake_case ( self : List[Any] , *__lowerCamelCase : List[str] , **__lowerCamelCase : Optional[Any] ): return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase ) def _snake_case ( self : Dict , *__lowerCamelCase : int , **__lowerCamelCase : List[Any] ): return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase ) @property def _snake_case ( self : str ): warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __lowerCamelCase , ) return self.image_processor_class @property def _snake_case ( self : Dict ): warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __lowerCamelCase , ) return self.image_processor
698
import unittest from pathlib import Path from tempfile import NamedTemporaryFile, TemporaryDirectory from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline from transformers.convert_graph_to_onnx import ( convert, ensure_valid_input, generate_identified_filename, infer_shapes, quantize, ) from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow class _SCREAMING_SNAKE_CASE : '''simple docstring''' def _snake_case ( self : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any ): return None class _SCREAMING_SNAKE_CASE : '''simple docstring''' def _snake_case ( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : int ): return None class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = [ # (model_name, model_kwargs) ("bert-base-cased", {}), ("gpt2", {"use_cache": False}), # We don't support exporting GPT2 past keys anymore ] @require_tf @slow def _snake_case ( self : List[Any] ): for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(__lowerCamelCase , "tf" , 12 , **__lowerCamelCase ) @require_torch @slow def _snake_case ( self : List[Any] ): for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(__lowerCamelCase , "pt" , 12 , **__lowerCamelCase ) @require_torch @slow def _snake_case ( self : Optional[int] ): from transformers import BertModel SCREAMING_SNAKE_CASE = ["[UNK]", "[SEP]", "[CLS]", "[PAD]", "[MASK]", "some", "other", "words"] with NamedTemporaryFile(mode="w+t" ) as vocab_file: vocab_file.write("\n".join(__lowerCamelCase ) ) vocab_file.flush() SCREAMING_SNAKE_CASE = BertTokenizerFast(vocab_file.name ) with TemporaryDirectory() as bert_save_dir: SCREAMING_SNAKE_CASE = BertModel(BertConfig(vocab_size=len(__lowerCamelCase ) ) ) model.save_pretrained(__lowerCamelCase ) self._test_export(__lowerCamelCase , "pt" , 12 , __lowerCamelCase ) @require_tf @slow def _snake_case ( self : Any ): for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: SCREAMING_SNAKE_CASE = self._test_export(__lowerCamelCase , "tf" , 12 , **__lowerCamelCase ) SCREAMING_SNAKE_CASE = quantize(Path(__lowerCamelCase ) ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(__lowerCamelCase ).stat().st_size: self.fail("Quantized model is bigger than initial ONNX model" ) @require_torch @slow def _snake_case ( self : Any ): for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: SCREAMING_SNAKE_CASE = self._test_export(__lowerCamelCase , "pt" , 12 , **__lowerCamelCase ) SCREAMING_SNAKE_CASE = quantize(__lowerCamelCase ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(__lowerCamelCase ).stat().st_size: self.fail("Quantized model is bigger than initial ONNX model" ) def _snake_case ( self : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : int , __lowerCamelCase : Tuple=None , **__lowerCamelCase : List[str] ): try: # Compute path with TemporaryDirectory() as tempdir: SCREAMING_SNAKE_CASE = Path(__lowerCamelCase ).joinpath("model.onnx" ) # Remove folder if exists if path.parent.exists(): path.parent.rmdir() # Export convert(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ) return path except Exception as e: self.fail(__lowerCamelCase ) @require_torch @require_tokenizers @slow def _snake_case ( self : Dict ): from transformers import BertModel SCREAMING_SNAKE_CASE = BertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) ) SCREAMING_SNAKE_CASE = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" ) self._test_infer_dynamic_axis(__lowerCamelCase , __lowerCamelCase , "pt" ) @require_tf @require_tokenizers @slow def _snake_case ( self : int ): from transformers import TFBertModel SCREAMING_SNAKE_CASE = TFBertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) ) SCREAMING_SNAKE_CASE = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" ) self._test_infer_dynamic_axis(__lowerCamelCase , __lowerCamelCase , "tf" ) def _snake_case ( self : int , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str ): SCREAMING_SNAKE_CASE = FeatureExtractionPipeline(__lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE = ["input_ids", "token_type_ids", "attention_mask", "output_0", "output_1"] SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = infer_shapes(__lowerCamelCase , __lowerCamelCase ) # Assert all variables are present self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) ) self.assertTrue(all(var_name in shapes for var_name in variable_names ) ) self.assertSequenceEqual(variable_names[:3] , __lowerCamelCase ) self.assertSequenceEqual(variable_names[3:] , __lowerCamelCase ) # Assert inputs are {0: batch, 1: sequence} for var_name in ["input_ids", "token_type_ids", "attention_mask"]: self.assertDictEqual(shapes[var_name] , {0: "batch", 1: "sequence"} ) # Assert outputs are {0: batch, 1: sequence} and {0: batch} self.assertDictEqual(shapes["output_0"] , {0: "batch", 1: "sequence"} ) self.assertDictEqual(shapes["output_1"] , {0: "batch"} ) def _snake_case ( self : Optional[Any] ): SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask", "token_type_ids"] SCREAMING_SNAKE_CASE = {"input_ids": [1, 2, 3, 4], "attention_mask": [0, 0, 0, 0], "token_type_ids": [1, 1, 1, 1]} SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = ensure_valid_input(FuncContiguousArgs() , __lowerCamelCase , __lowerCamelCase ) # Should have exactly the same number of args (all are valid) self.assertEqual(len(__lowerCamelCase ) , 3 ) # Should have exactly the same input names self.assertEqual(set(__lowerCamelCase ) , set(__lowerCamelCase ) ) # Parameter should be reordered according to their respective place in the function: # (input_ids, token_type_ids, attention_mask) self.assertEqual(__lowerCamelCase , (tokens["input_ids"], tokens["token_type_ids"], tokens["attention_mask"]) ) # Generated args are interleaved with another args (for instance parameter "past" in GPT2) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = ensure_valid_input(FuncNonContiguousArgs() , __lowerCamelCase , __lowerCamelCase ) # Should have exactly the one arg (all before the one not provided "some_other_args") self.assertEqual(len(__lowerCamelCase ) , 1 ) self.assertEqual(len(__lowerCamelCase ) , 1 ) # Should have only "input_ids" self.assertEqual(inputs_args[0] , tokens["input_ids"] ) self.assertEqual(ordered_input_names[0] , "input_ids" ) def _snake_case ( self : Optional[int] ): SCREAMING_SNAKE_CASE = generate_identified_filename(Path("/home/something/my_fake_model.onnx" ) , "-test" ) self.assertEqual("/home/something/my_fake_model-test.onnx" , generated.as_posix() )
698
1
from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast from ...utils import logging __A : Optional[int] = logging.get_logger(__name__) __A : int = { 'EleutherAI/gpt-neo-1.3B': 'https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json', # See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo } class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = "gpt_neo" lowerCamelCase__ = ["past_key_values"] lowerCamelCase__ = {"num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"} def __init__( self : str , __lowerCamelCase : Dict=50257 , __lowerCamelCase : Tuple=2048 , __lowerCamelCase : Optional[Any]=2048 , __lowerCamelCase : int=24 , __lowerCamelCase : int=[[["global", "local"], 12]] , __lowerCamelCase : int=16 , __lowerCamelCase : List[str]=None , __lowerCamelCase : List[Any]=256 , __lowerCamelCase : Tuple="gelu_new" , __lowerCamelCase : Optional[Any]=0.0 , __lowerCamelCase : str=0.0 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : List[Any]=1e-5 , __lowerCamelCase : Dict=0.02 , __lowerCamelCase : Dict=True , __lowerCamelCase : Dict=50256 , __lowerCamelCase : Optional[int]=50256 , **__lowerCamelCase : Dict , ): SCREAMING_SNAKE_CASE = vocab_size SCREAMING_SNAKE_CASE = max_position_embeddings SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = num_layers SCREAMING_SNAKE_CASE = num_heads SCREAMING_SNAKE_CASE = intermediate_size SCREAMING_SNAKE_CASE = window_size SCREAMING_SNAKE_CASE = activation_function SCREAMING_SNAKE_CASE = resid_dropout SCREAMING_SNAKE_CASE = embed_dropout SCREAMING_SNAKE_CASE = attention_dropout SCREAMING_SNAKE_CASE = classifier_dropout SCREAMING_SNAKE_CASE = layer_norm_epsilon SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = use_cache SCREAMING_SNAKE_CASE = bos_token_id SCREAMING_SNAKE_CASE = eos_token_id SCREAMING_SNAKE_CASE = attention_types SCREAMING_SNAKE_CASE = self.expand_attention_types_params(__lowerCamelCase ) if len(self.attention_layers ) != self.num_layers: raise ValueError( "Configuration for convolutional module is incorrect. " "It is required that `len(config.attention_layers)` == `config.num_layers` " f"but is `len(config.attention_layers) = {len(self.attention_layers )}`, " f"`config.num_layers = {self.num_layers}`. " "`config.attention_layers` is prepared using `config.attention_types`. " "Please verify the value of `config.attention_types` argument." ) super().__init__(bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase ) @staticmethod def _snake_case ( __lowerCamelCase : Dict ): SCREAMING_SNAKE_CASE = [] for item in attention_types: for _ in range(item[1] ): attentions.extend(item[0] ) return attentions def __a ( A__ : str , A__ : List[Any] , A__ : List[str] , A__ : Union[str, Any] ): import torch SCREAMING_SNAKE_CASE = input.size() SCREAMING_SNAKE_CASE = len(A__ ) SCREAMING_SNAKE_CASE = shape[dimension] SCREAMING_SNAKE_CASE = torch.arange(0 , A__ , A__ ) SCREAMING_SNAKE_CASE = torch.div(sizedim - size , A__ , rounding_mode="floor" ) + 1 SCREAMING_SNAKE_CASE = torch.arange(A__ ) + low_indices[:min_length][:, None] SCREAMING_SNAKE_CASE = [slice(A__ )] * rank SCREAMING_SNAKE_CASE = indices SCREAMING_SNAKE_CASE = input[s] SCREAMING_SNAKE_CASE = list(range(0 , rank + 1 ) ) perm.append(perm.pop(dimension + 1 ) ) return sliced.permute(A__ ) def __a ( A__ : Union[str, Any] , A__ : Optional[int] ): import torch SCREAMING_SNAKE_CASE = torch.arange(1 , A__ ) SCREAMING_SNAKE_CASE = torch.remainder(A__ , A__ ) SCREAMING_SNAKE_CASE = remainders == 0 SCREAMING_SNAKE_CASE = candidates[divisor_indices] SCREAMING_SNAKE_CASE = torch.max(A__ ) return largest_divisor, torch.div(A__ , A__ , rounding_mode="floor" ) class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' @property def _snake_case ( self : Any ): SCREAMING_SNAKE_CASE = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} ) if self.use_past: self.fill_with_past_key_values_(__lowerCamelCase , direction="inputs" ) SCREAMING_SNAKE_CASE = {0: "batch", 1: "past_sequence + sequence"} else: SCREAMING_SNAKE_CASE = {0: "batch", 1: "sequence"} return common_inputs @property def _snake_case ( self : Optional[int] ): return self._config.num_heads def _snake_case ( self : Union[str, Any] , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ): SCREAMING_SNAKE_CASE = super(__lowerCamelCase , self ).generate_dummy_inputs( __lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase ) # We need to order the input in the way they appears in the forward() SCREAMING_SNAKE_CASE = OrderedDict({"input_ids": common_inputs["input_ids"]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = common_inputs["input_ids"].shape # Not using the same length for past_key_values SCREAMING_SNAKE_CASE = seqlen + 2 SCREAMING_SNAKE_CASE = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) SCREAMING_SNAKE_CASE = [ (torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase )) for _ in range(self.num_layers ) ] SCREAMING_SNAKE_CASE = common_inputs["attention_mask"] if self.use_past: SCREAMING_SNAKE_CASE = ordered_inputs["attention_mask"].dtype SCREAMING_SNAKE_CASE = torch.cat( [ordered_inputs["attention_mask"], torch.ones(__lowerCamelCase , __lowerCamelCase , dtype=__lowerCamelCase )] , dim=1 ) return ordered_inputs @property def _snake_case ( self : Optional[int] ): return 13
698
from manim import * class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' def _snake_case ( self : List[Any] ): SCREAMING_SNAKE_CASE = Rectangle(height=0.5 , width=0.5 ) SCREAMING_SNAKE_CASE = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )] SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )] SCREAMING_SNAKE_CASE = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 ) SCREAMING_SNAKE_CASE = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 ) SCREAMING_SNAKE_CASE = VGroup(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0 ) SCREAMING_SNAKE_CASE = Text("CPU" , font_size=24 ) SCREAMING_SNAKE_CASE = Group(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0.5 , aligned_edge=__lowerCamelCase ) cpu.move_to([-2.5, -0.5, 0] ) self.add(__lowerCamelCase ) SCREAMING_SNAKE_CASE = [mem.copy() for i in range(4 )] SCREAMING_SNAKE_CASE = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 ) SCREAMING_SNAKE_CASE = Text("GPU" , font_size=24 ) SCREAMING_SNAKE_CASE = Group(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0.5 , aligned_edge=__lowerCamelCase ) gpu.move_to([-1, -1, 0] ) self.add(__lowerCamelCase ) SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )] SCREAMING_SNAKE_CASE = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 ) SCREAMING_SNAKE_CASE = Text("Model" , font_size=24 ) SCREAMING_SNAKE_CASE = Group(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0.5 , aligned_edge=__lowerCamelCase ) model.move_to([3, -1.0, 0] ) self.add(__lowerCamelCase ) SCREAMING_SNAKE_CASE = [] for i, rect in enumerate(__lowerCamelCase ): rect.set_stroke(__lowerCamelCase ) # target = fill.copy().set_fill(YELLOW, opacity=0.7) # target.move_to(rect) # self.add(target) SCREAMING_SNAKE_CASE = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__lowerCamelCase , opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__lowerCamelCase ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(cpu_targs[0] , direction=__lowerCamelCase , buff=0.0 ) else: cpu_target.next_to(cpu_targs[i - 1] , direction=__lowerCamelCase , buff=0.0 ) self.add(__lowerCamelCase ) cpu_targs.append(__lowerCamelCase ) SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )] SCREAMING_SNAKE_CASE = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 ) SCREAMING_SNAKE_CASE = Text("Loaded Checkpoint" , font_size=24 ) SCREAMING_SNAKE_CASE = Group(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , aligned_edge=__lowerCamelCase , buff=0.4 ) checkpoint.move_to([3, 0.5, 0] ) SCREAMING_SNAKE_CASE = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) SCREAMING_SNAKE_CASE = MarkupText( f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(__lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE = MarkupText( f"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , ) blue_text.next_to(__lowerCamelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() ) SCREAMING_SNAKE_CASE = MarkupText( f"Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>." , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(__lowerCamelCase ) , Write(__lowerCamelCase ) ) self.play(Write(__lowerCamelCase , run_time=1 ) , Create(__lowerCamelCase , run_time=1 ) ) SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = [] for i, rect in enumerate(__lowerCamelCase ): SCREAMING_SNAKE_CASE = fill.copy().set_fill(__lowerCamelCase , opacity=0.7 ) target.move_to(__lowerCamelCase ) first_animations.append(GrowFromCenter(__lowerCamelCase , run_time=1 ) ) SCREAMING_SNAKE_CASE = target.copy() cpu_target.generate_target() if i < 5: cpu_target.target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.target.move_to(cpu_right_col_base[i - 5] ) second_animations.append(MoveToTarget(__lowerCamelCase , run_time=1.5 ) ) self.play(*__lowerCamelCase ) self.play(*__lowerCamelCase ) self.wait()
698
1
from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging __A : Optional[Any] = logging.get_logger(__name__) __A : str = { 't5-small': 'https://huggingface.co/t5-small/resolve/main/config.json', 't5-base': 'https://huggingface.co/t5-base/resolve/main/config.json', 't5-large': 'https://huggingface.co/t5-large/resolve/main/config.json', 't5-3b': 'https://huggingface.co/t5-3b/resolve/main/config.json', 't5-11b': 'https://huggingface.co/t5-11b/resolve/main/config.json', } class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = "t5" lowerCamelCase__ = ["past_key_values"] lowerCamelCase__ = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"} def __init__( self : Tuple , __lowerCamelCase : List[str]=32128 , __lowerCamelCase : Optional[Any]=512 , __lowerCamelCase : Dict=64 , __lowerCamelCase : Union[str, Any]=2048 , __lowerCamelCase : Union[str, Any]=6 , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : str=8 , __lowerCamelCase : List[str]=32 , __lowerCamelCase : Union[str, Any]=128 , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : List[Any]=1e-6 , __lowerCamelCase : Union[str, Any]=1.0 , __lowerCamelCase : int="relu" , __lowerCamelCase : int=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : List[str]=0 , __lowerCamelCase : Union[str, Any]=1 , **__lowerCamelCase : int , ): SCREAMING_SNAKE_CASE = vocab_size SCREAMING_SNAKE_CASE = d_model SCREAMING_SNAKE_CASE = d_kv SCREAMING_SNAKE_CASE = d_ff SCREAMING_SNAKE_CASE = num_layers SCREAMING_SNAKE_CASE = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry SCREAMING_SNAKE_CASE = num_heads SCREAMING_SNAKE_CASE = relative_attention_num_buckets SCREAMING_SNAKE_CASE = relative_attention_max_distance SCREAMING_SNAKE_CASE = dropout_rate SCREAMING_SNAKE_CASE = layer_norm_epsilon SCREAMING_SNAKE_CASE = initializer_factor SCREAMING_SNAKE_CASE = feed_forward_proj SCREAMING_SNAKE_CASE = use_cache SCREAMING_SNAKE_CASE = self.feed_forward_proj.split("-" ) SCREAMING_SNAKE_CASE = act_info[-1] SCREAMING_SNAKE_CASE = act_info[0] == "gated" if len(__lowerCamelCase ) > 1 and act_info[0] != "gated" or len(__lowerCamelCase ) > 2: raise ValueError( f"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer." "Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. " "'gated-gelu' or 'relu'" ) # for backwards compatibility if feed_forward_proj == "gated-gelu": SCREAMING_SNAKE_CASE = "gelu_new" super().__init__( pad_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , is_encoder_decoder=__lowerCamelCase , **__lowerCamelCase , ) class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' @property def _snake_case ( self : List[Any] ): SCREAMING_SNAKE_CASE = { "input_ids": {0: "batch", 1: "encoder_sequence"}, "attention_mask": {0: "batch", 1: "encoder_sequence"}, } if self.use_past: SCREAMING_SNAKE_CASE = "past_encoder_sequence + sequence" SCREAMING_SNAKE_CASE = {0: "batch"} SCREAMING_SNAKE_CASE = {0: "batch", 1: "past_decoder_sequence + sequence"} else: SCREAMING_SNAKE_CASE = {0: "batch", 1: "decoder_sequence"} SCREAMING_SNAKE_CASE = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(__lowerCamelCase , direction="inputs" ) return common_inputs @property def _snake_case ( self : str ): return 13
698
import unittest from transformers import is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class _SCREAMING_SNAKE_CASE : '''simple docstring''' @staticmethod def _snake_case ( *__lowerCamelCase : Any , **__lowerCamelCase : Any ): pass @is_pipeline_test @require_vision class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' @require_torch def _snake_case ( self : Dict ): SCREAMING_SNAKE_CASE = pipeline( model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , ) SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) SCREAMING_SNAKE_CASE = image_classifier(__lowerCamelCase , candidate_labels=["a", "b", "c"] ) # The floating scores are so close, we enter floating error approximation and the order is not guaranteed across # python and torch versions. self.assertIn( nested_simplify(__lowerCamelCase ) , [ [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}], [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}], ] , ) SCREAMING_SNAKE_CASE = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 ) self.assertEqual( nested_simplify(__lowerCamelCase ) , [ [ {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, ], [ {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, ], [ {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, ], [ {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, ], [ {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, ], ] , ) @require_tf def _snake_case ( self : int ): SCREAMING_SNAKE_CASE = pipeline( model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , framework="tf" ) SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) SCREAMING_SNAKE_CASE = image_classifier(__lowerCamelCase , candidate_labels=["a", "b", "c"] ) self.assertEqual( nested_simplify(__lowerCamelCase ) , [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}] , ) SCREAMING_SNAKE_CASE = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 ) self.assertEqual( nested_simplify(__lowerCamelCase ) , [ [ {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, ], [ {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, ], [ {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, ], [ {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, ], [ {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, ], ] , ) @slow @require_torch def _snake_case ( self : Tuple ): SCREAMING_SNAKE_CASE = pipeline( task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , ) # This is an image of 2 cats with remotes and no planes SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) SCREAMING_SNAKE_CASE = image_classifier(__lowerCamelCase , candidate_labels=["cat", "plane", "remote"] ) self.assertEqual( nested_simplify(__lowerCamelCase ) , [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ] , ) SCREAMING_SNAKE_CASE = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 ) self.assertEqual( nested_simplify(__lowerCamelCase ) , [ [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ] * 5 , ) @slow @require_tf def _snake_case ( self : Any ): SCREAMING_SNAKE_CASE = pipeline( task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , framework="tf" ) # This is an image of 2 cats with remotes and no planes SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) SCREAMING_SNAKE_CASE = image_classifier(__lowerCamelCase , candidate_labels=["cat", "plane", "remote"] ) self.assertEqual( nested_simplify(__lowerCamelCase ) , [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ] , ) SCREAMING_SNAKE_CASE = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 ) self.assertEqual( nested_simplify(__lowerCamelCase ) , [ [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ] * 5 , )
698
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __A : Dict = { 'configuration_bigbird_pegasus': [ 'BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BigBirdPegasusConfig', 'BigBirdPegasusOnnxConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : List[Any] = [ 'BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST', 'BigBirdPegasusForCausalLM', 'BigBirdPegasusForConditionalGeneration', 'BigBirdPegasusForQuestionAnswering', 'BigBirdPegasusForSequenceClassification', 'BigBirdPegasusModel', 'BigBirdPegasusPreTrainedModel', ] if TYPE_CHECKING: from .configuration_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP, BigBirdPegasusConfig, BigBirdPegasusOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST, BigBirdPegasusForCausalLM, BigBirdPegasusForConditionalGeneration, BigBirdPegasusForQuestionAnswering, BigBirdPegasusForSequenceClassification, BigBirdPegasusModel, BigBirdPegasusPreTrainedModel, ) else: import sys __A : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
698
__A : dict[str, float] = { "joule": 1.0, "kilojoule": 1_0_0_0, "megajoule": 1_0_0_0_0_0_0, "gigajoule": 1_0_0_0_0_0_0_0_0_0, "wattsecond": 1.0, "watthour": 3_6_0_0, "kilowatthour": 3_6_0_0_0_0_0, "newtonmeter": 1.0, "calorie_nutr": 4_1_8_6.8, "kilocalorie_nutr": 4_1_8_6_8_0_0.0_0, "electronvolt": 1.6_0217_6634e-19, "britishthermalunit_it": 1_0_5_5.0_5_5_8_5, "footpound": 1.355_818, } def __a ( A__ : str , A__ : str , A__ : float ): if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION: SCREAMING_SNAKE_CASE = ( F"Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n" F"Valid values are: {', '.join(A__ )}" ) raise ValueError(A__ ) return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type] if __name__ == "__main__": import doctest doctest.testmod()
698
1
import os import pytest from datasets import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, ) __A : Dict = pytest.mark.integration @pytest.mark.parametrize("path" , ["paws", "csv"] ) def __a ( A__ : str , A__ : str ): inspect_dataset(A__ , A__ ) SCREAMING_SNAKE_CASE = path + ".py" assert script_name in os.listdir(A__ ) assert "__pycache__" not in os.listdir(A__ ) @pytest.mark.filterwarnings("ignore:inspect_metric is deprecated:FutureWarning" ) @pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" ) @pytest.mark.parametrize("path" , ["accuracy"] ) def __a ( A__ : List[str] , A__ : Optional[Any] ): inspect_metric(A__ , A__ ) SCREAMING_SNAKE_CASE = path + ".py" assert script_name in os.listdir(A__ ) assert "__pycache__" not in os.listdir(A__ ) @pytest.mark.parametrize( "path, config_name, expected_splits" , [ ("squad", "plain_text", ["train", "validation"]), ("dalle-mini/wit", "dalle-mini--wit", ["train"]), ("paws", "labeled_final", ["train", "test", "validation"]), ] , ) def __a ( A__ : str , A__ : Optional[Any] , A__ : Any ): SCREAMING_SNAKE_CASE = get_dataset_config_info(A__ , config_name=A__ ) assert info.config_name == config_name assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( "path, config_name, expected_exception" , [ ("paws", None, ValueError), ] , ) def __a ( A__ : Optional[Any] , A__ : List[str] , A__ : Optional[int] ): with pytest.raises(A__ ): get_dataset_config_info(A__ , config_name=A__ ) @pytest.mark.parametrize( "path, expected" , [ ("squad", "plain_text"), ("acronym_identification", "default"), ("lhoestq/squad", "plain_text"), ("lhoestq/test", "default"), ("lhoestq/demo1", "lhoestq--demo1"), ("dalle-mini/wit", "dalle-mini--wit"), ] , ) def __a ( A__ : int , A__ : Tuple ): SCREAMING_SNAKE_CASE = get_dataset_config_names(A__ ) assert expected in config_names @pytest.mark.parametrize( "path, expected_configs, expected_splits_in_first_config" , [ ("squad", ["plain_text"], ["train", "validation"]), ("dalle-mini/wit", ["dalle-mini--wit"], ["train"]), ("paws", ["labeled_final", "labeled_swap", "unlabeled_final"], ["train", "test", "validation"]), ] , ) def __a ( A__ : Union[str, Any] , A__ : Tuple , A__ : Any ): SCREAMING_SNAKE_CASE = get_dataset_infos(A__ ) assert list(infos.keys() ) == expected_configs SCREAMING_SNAKE_CASE = expected_configs[0] assert expected_config in infos SCREAMING_SNAKE_CASE = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits_in_first_config @pytest.mark.parametrize( "path, expected_config, expected_splits" , [ ("squad", "plain_text", ["train", "validation"]), ("dalle-mini/wit", "dalle-mini--wit", ["train"]), ("paws", "labeled_final", ["train", "test", "validation"]), ] , ) def __a ( A__ : List[Any] , A__ : List[Any] , A__ : Dict ): SCREAMING_SNAKE_CASE = get_dataset_infos(A__ ) assert expected_config in infos SCREAMING_SNAKE_CASE = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( "path, config_name, expected_exception" , [ ("paws", None, ValueError), ] , ) def __a ( A__ : str , A__ : Tuple , A__ : Optional[Any] ): with pytest.raises(A__ ): get_dataset_split_names(A__ , config_name=A__ )
698
from collections import deque from .hash_table import HashTable class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' def __init__( self : Optional[int] , *__lowerCamelCase : List[Any] , **__lowerCamelCase : Optional[Any] ): super().__init__(*__lowerCamelCase , **__lowerCamelCase ) def _snake_case ( self : Any , __lowerCamelCase : Dict , __lowerCamelCase : Tuple ): SCREAMING_SNAKE_CASE = deque([] ) if self.values[key] is None else self.values[key] self.values[key].appendleft(__lowerCamelCase ) SCREAMING_SNAKE_CASE = self.values[key] def _snake_case ( self : Union[str, Any] ): return ( sum(self.charge_factor - len(__lowerCamelCase ) for slot in self.values ) / self.size_table * self.charge_factor ) def _snake_case ( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Tuple=None ): if not ( len(self.values[key] ) == self.charge_factor and self.values.count(__lowerCamelCase ) == 0 ): return key return super()._collision_resolution(__lowerCamelCase , __lowerCamelCase )
698
1
import logging import os import random import sys from dataclasses import dataclass, field from typing import Optional import datasets import numpy as np import pandas as pd from datasets import load_dataset import transformers from transformers import ( AutoConfig, BartForSequenceClassification, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, TapexTokenizer, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('4.17.0.dev0') require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/text-classification/requirements.txt') __A : Tuple = logging.getLogger(__name__) @dataclass class _SCREAMING_SNAKE_CASE : '''simple docstring''' lowerCamelCase__ = field( default="tab_fact" , metadata={"help": "The name of the dataset to use (via the datasets library)."} ) lowerCamelCase__ = field( default="tab_fact" , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} , ) lowerCamelCase__ = field( default=1_0_2_4 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) lowerCamelCase__ = field( default=__snake_case , metadata={"help": "Overwrite the cached preprocessed datasets or not."} ) lowerCamelCase__ = field( default=__snake_case , metadata={ "help": ( "Whether to pad all samples to `max_seq_length`. " "If False, will pad the samples dynamically when batching to the maximum length in the batch." ) } , ) lowerCamelCase__ = field( default=__snake_case , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) } , ) lowerCamelCase__ = field( default=__snake_case , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) } , ) lowerCamelCase__ = field( default=__snake_case , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of prediction examples to this " "value if set." ) } , ) lowerCamelCase__ = field( default=__snake_case , metadata={"help": "A csv or a json file containing the training data."} ) lowerCamelCase__ = field( default=__snake_case , metadata={"help": "A csv or a json file containing the validation data."} ) lowerCamelCase__ = field(default=__snake_case , metadata={"help": "A csv or a json file containing the test data."} ) def _snake_case ( self : List[str] ): if self.dataset_name is not None: pass elif self.train_file is None or self.validation_file is None: raise ValueError("Need either a GLUE task, a training/validation file or a dataset name." ) else: SCREAMING_SNAKE_CASE = self.train_file.split("." )[-1] assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file." SCREAMING_SNAKE_CASE = self.validation_file.split("." )[-1] assert ( validation_extension == train_extension ), "`validation_file` should have the same extension (csv or json) as `train_file`." @dataclass class _SCREAMING_SNAKE_CASE : '''simple docstring''' lowerCamelCase__ = field( default=__snake_case , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) lowerCamelCase__ = field( default=__snake_case , metadata={"help": "Pretrained config name or path if not the same as model_name"} ) lowerCamelCase__ = field( default=__snake_case , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) lowerCamelCase__ = field( default=__snake_case , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) lowerCamelCase__ = field( default=__snake_case , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , ) lowerCamelCase__ = field( default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , ) lowerCamelCase__ = field( default=__snake_case , metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) } , ) def __a ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses() # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , ) SCREAMING_SNAKE_CASE = training_args.get_process_log_level() logger.setLevel(A__ ) datasets.utils.logging.set_verbosity(A__ ) transformers.utils.logging.set_verbosity(A__ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" ) logger.info(F"Training/evaluation parameters {training_args}" ) # Detecting last checkpoint. SCREAMING_SNAKE_CASE = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: SCREAMING_SNAKE_CASE = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) # or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub). # # For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table. # # If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this # single column. You can easily tweak this behavior (see below) # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. SCREAMING_SNAKE_CASE = load_dataset( data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir ) else: # Loading a dataset from your local files. # CSV/JSON training and evaluation files are needed. SCREAMING_SNAKE_CASE = {"train": data_args.train_file, "validation": data_args.validation_file} # Get the test dataset: you can provide your own CSV/JSON test file (see below) # when you use `do_predict` without specifying a GLUE benchmark task. if training_args.do_predict: if data_args.test_file is not None: SCREAMING_SNAKE_CASE = data_args.train_file.split("." )[-1] SCREAMING_SNAKE_CASE = data_args.test_file.split("." )[-1] assert ( test_extension == train_extension ), "`test_file` should have the same extension (csv or json) as `train_file`." SCREAMING_SNAKE_CASE = data_args.test_file else: raise ValueError("Need either a GLUE task or a test file for `do_predict`." ) for key in data_files.keys(): logger.info(F"load a local file for {key}: {data_files[key]}" ) if data_args.train_file.endswith(".csv" ): # Loading a dataset from local csv files SCREAMING_SNAKE_CASE = load_dataset("csv" , data_files=A__ , cache_dir=model_args.cache_dir ) else: # Loading a dataset from local json files SCREAMING_SNAKE_CASE = load_dataset("json" , data_files=A__ , cache_dir=model_args.cache_dir ) # See more about loading any type of standard or custom dataset at # https://huggingface.co/docs/datasets/loading_datasets.html. # Labels SCREAMING_SNAKE_CASE = raw_datasets["train"].features["label"].names SCREAMING_SNAKE_CASE = len(A__ ) # Load pretrained model and tokenizer # # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=A__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # load tapex tokenizer SCREAMING_SNAKE_CASE = TapexTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=A__ , ) SCREAMING_SNAKE_CASE = BartForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=A__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # Padding strategy if data_args.pad_to_max_length: SCREAMING_SNAKE_CASE = "max_length" else: # We will pad later, dynamically at batch creation, to the max sequence length in each batch SCREAMING_SNAKE_CASE = False # Some models have set the order of the labels to use, so let's make sure we do use it. SCREAMING_SNAKE_CASE = {"Refused": 0, "Entailed": 1} SCREAMING_SNAKE_CASE = {0: "Refused", 1: "Entailed"} if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( F"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the" F"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." ) SCREAMING_SNAKE_CASE = min(data_args.max_seq_length , tokenizer.model_max_length ) def preprocess_tabfact_function(A__ : List[str] ): # Tokenize the texts def _convert_table_text_to_pandas(A__ : List[Any] ): SCREAMING_SNAKE_CASE = [_table_row.split("#" ) for _table_row in _table_text.strip("\n" ).split("\n" )] SCREAMING_SNAKE_CASE = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] ) return _table_pd SCREAMING_SNAKE_CASE = examples["statement"] SCREAMING_SNAKE_CASE = list(map(_convert_table_text_to_pandas , examples["table_text"] ) ) SCREAMING_SNAKE_CASE = tokenizer(A__ , A__ , padding=A__ , max_length=A__ , truncation=A__ ) SCREAMING_SNAKE_CASE = examples["label"] return result with training_args.main_process_first(desc="dataset map pre-processing" ): SCREAMING_SNAKE_CASE = raw_datasets.map( A__ , batched=A__ , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on dataset" , ) if training_args.do_train: if "train" not in raw_datasets: raise ValueError("--do_train requires a train dataset" ) SCREAMING_SNAKE_CASE = raw_datasets["train"] if data_args.max_train_samples is not None: SCREAMING_SNAKE_CASE = train_dataset.select(range(data_args.max_train_samples ) ) if training_args.do_eval: if "validation" not in raw_datasets and "validation_matched" not in raw_datasets: raise ValueError("--do_eval requires a validation dataset" ) SCREAMING_SNAKE_CASE = raw_datasets["validation"] if data_args.max_eval_samples is not None: SCREAMING_SNAKE_CASE = eval_dataset.select(range(data_args.max_eval_samples ) ) if training_args.do_predict or data_args.test_file is not None: if "test" not in raw_datasets and "test_matched" not in raw_datasets: raise ValueError("--do_predict requires a test dataset" ) SCREAMING_SNAKE_CASE = raw_datasets["test"] if data_args.max_predict_samples is not None: SCREAMING_SNAKE_CASE = predict_dataset.select(range(data_args.max_predict_samples ) ) # Log a few random samples from the training set: if training_args.do_train: for index in random.sample(range(len(A__ ) ) , 3 ): logger.info(F"Sample {index} of the training set: {train_dataset[index]}." ) # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(A__ : EvalPrediction ): SCREAMING_SNAKE_CASE = p.predictions[0] if isinstance(p.predictions , A__ ) else p.predictions SCREAMING_SNAKE_CASE = np.argmax(A__ , axis=1 ) return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()} # Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding. if data_args.pad_to_max_length: SCREAMING_SNAKE_CASE = default_data_collator elif training_args.fpaa: SCREAMING_SNAKE_CASE = DataCollatorWithPadding(A__ , pad_to_multiple_of=8 ) else: SCREAMING_SNAKE_CASE = None # Initialize our Trainer SCREAMING_SNAKE_CASE = Trainer( model=A__ , args=A__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=A__ , tokenizer=A__ , data_collator=A__ , ) # Training if training_args.do_train: SCREAMING_SNAKE_CASE = None if training_args.resume_from_checkpoint is not None: SCREAMING_SNAKE_CASE = training_args.resume_from_checkpoint elif last_checkpoint is not None: SCREAMING_SNAKE_CASE = last_checkpoint SCREAMING_SNAKE_CASE = trainer.train(resume_from_checkpoint=A__ ) SCREAMING_SNAKE_CASE = train_result.metrics SCREAMING_SNAKE_CASE = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(A__ ) ) SCREAMING_SNAKE_CASE = min(A__ , len(A__ ) ) trainer.save_model() # Saves the tokenizer too for easy upload trainer.log_metrics("train" , A__ ) trainer.save_metrics("train" , A__ ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***" ) SCREAMING_SNAKE_CASE = trainer.evaluate(eval_dataset=A__ ) SCREAMING_SNAKE_CASE = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(A__ ) SCREAMING_SNAKE_CASE = min(A__ , len(A__ ) ) trainer.log_metrics("eval" , A__ ) trainer.save_metrics("eval" , A__ ) if training_args.do_predict: logger.info("*** Predict ***" ) # Removing the `label` columns because it contains -1 and Trainer won't like that. SCREAMING_SNAKE_CASE = predict_dataset.remove_columns("label" ) SCREAMING_SNAKE_CASE = trainer.predict(A__ , metric_key_prefix="predict" ).predictions SCREAMING_SNAKE_CASE = np.argmax(A__ , axis=1 ) SCREAMING_SNAKE_CASE = os.path.join(training_args.output_dir , "predict_results_tabfact.txt" ) if trainer.is_world_process_zero(): with open(A__ , "w" ) as writer: logger.info("***** Predict Results *****" ) writer.write("index\tprediction\n" ) for index, item in enumerate(A__ ): SCREAMING_SNAKE_CASE = label_list[item] writer.write(F"{index}\t{item}\n" ) SCREAMING_SNAKE_CASE = {"finetuned_from": model_args.model_name_or_path, "tasks": "text-classification"} if training_args.push_to_hub: trainer.push_to_hub(**A__ ) else: trainer.create_model_card(**A__ ) def __a ( A__ : Dict ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
698
from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast from ...utils import logging __A : Optional[int] = logging.get_logger(__name__) __A : int = { 'EleutherAI/gpt-neo-1.3B': 'https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json', # See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo } class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = "gpt_neo" lowerCamelCase__ = ["past_key_values"] lowerCamelCase__ = {"num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"} def __init__( self : str , __lowerCamelCase : Dict=50257 , __lowerCamelCase : Tuple=2048 , __lowerCamelCase : Optional[Any]=2048 , __lowerCamelCase : int=24 , __lowerCamelCase : int=[[["global", "local"], 12]] , __lowerCamelCase : int=16 , __lowerCamelCase : List[str]=None , __lowerCamelCase : List[Any]=256 , __lowerCamelCase : Tuple="gelu_new" , __lowerCamelCase : Optional[Any]=0.0 , __lowerCamelCase : str=0.0 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : List[Any]=1e-5 , __lowerCamelCase : Dict=0.02 , __lowerCamelCase : Dict=True , __lowerCamelCase : Dict=50256 , __lowerCamelCase : Optional[int]=50256 , **__lowerCamelCase : Dict , ): SCREAMING_SNAKE_CASE = vocab_size SCREAMING_SNAKE_CASE = max_position_embeddings SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = num_layers SCREAMING_SNAKE_CASE = num_heads SCREAMING_SNAKE_CASE = intermediate_size SCREAMING_SNAKE_CASE = window_size SCREAMING_SNAKE_CASE = activation_function SCREAMING_SNAKE_CASE = resid_dropout SCREAMING_SNAKE_CASE = embed_dropout SCREAMING_SNAKE_CASE = attention_dropout SCREAMING_SNAKE_CASE = classifier_dropout SCREAMING_SNAKE_CASE = layer_norm_epsilon SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = use_cache SCREAMING_SNAKE_CASE = bos_token_id SCREAMING_SNAKE_CASE = eos_token_id SCREAMING_SNAKE_CASE = attention_types SCREAMING_SNAKE_CASE = self.expand_attention_types_params(__lowerCamelCase ) if len(self.attention_layers ) != self.num_layers: raise ValueError( "Configuration for convolutional module is incorrect. " "It is required that `len(config.attention_layers)` == `config.num_layers` " f"but is `len(config.attention_layers) = {len(self.attention_layers )}`, " f"`config.num_layers = {self.num_layers}`. " "`config.attention_layers` is prepared using `config.attention_types`. " "Please verify the value of `config.attention_types` argument." ) super().__init__(bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase ) @staticmethod def _snake_case ( __lowerCamelCase : Dict ): SCREAMING_SNAKE_CASE = [] for item in attention_types: for _ in range(item[1] ): attentions.extend(item[0] ) return attentions def __a ( A__ : str , A__ : List[Any] , A__ : List[str] , A__ : Union[str, Any] ): import torch SCREAMING_SNAKE_CASE = input.size() SCREAMING_SNAKE_CASE = len(A__ ) SCREAMING_SNAKE_CASE = shape[dimension] SCREAMING_SNAKE_CASE = torch.arange(0 , A__ , A__ ) SCREAMING_SNAKE_CASE = torch.div(sizedim - size , A__ , rounding_mode="floor" ) + 1 SCREAMING_SNAKE_CASE = torch.arange(A__ ) + low_indices[:min_length][:, None] SCREAMING_SNAKE_CASE = [slice(A__ )] * rank SCREAMING_SNAKE_CASE = indices SCREAMING_SNAKE_CASE = input[s] SCREAMING_SNAKE_CASE = list(range(0 , rank + 1 ) ) perm.append(perm.pop(dimension + 1 ) ) return sliced.permute(A__ ) def __a ( A__ : Union[str, Any] , A__ : Optional[int] ): import torch SCREAMING_SNAKE_CASE = torch.arange(1 , A__ ) SCREAMING_SNAKE_CASE = torch.remainder(A__ , A__ ) SCREAMING_SNAKE_CASE = remainders == 0 SCREAMING_SNAKE_CASE = candidates[divisor_indices] SCREAMING_SNAKE_CASE = torch.max(A__ ) return largest_divisor, torch.div(A__ , A__ , rounding_mode="floor" ) class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' @property def _snake_case ( self : Any ): SCREAMING_SNAKE_CASE = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} ) if self.use_past: self.fill_with_past_key_values_(__lowerCamelCase , direction="inputs" ) SCREAMING_SNAKE_CASE = {0: "batch", 1: "past_sequence + sequence"} else: SCREAMING_SNAKE_CASE = {0: "batch", 1: "sequence"} return common_inputs @property def _snake_case ( self : Optional[int] ): return self._config.num_heads def _snake_case ( self : Union[str, Any] , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ): SCREAMING_SNAKE_CASE = super(__lowerCamelCase , self ).generate_dummy_inputs( __lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase ) # We need to order the input in the way they appears in the forward() SCREAMING_SNAKE_CASE = OrderedDict({"input_ids": common_inputs["input_ids"]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = common_inputs["input_ids"].shape # Not using the same length for past_key_values SCREAMING_SNAKE_CASE = seqlen + 2 SCREAMING_SNAKE_CASE = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) SCREAMING_SNAKE_CASE = [ (torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase )) for _ in range(self.num_layers ) ] SCREAMING_SNAKE_CASE = common_inputs["attention_mask"] if self.use_past: SCREAMING_SNAKE_CASE = ordered_inputs["attention_mask"].dtype SCREAMING_SNAKE_CASE = torch.cat( [ordered_inputs["attention_mask"], torch.ones(__lowerCamelCase , __lowerCamelCase , dtype=__lowerCamelCase )] , dim=1 ) return ordered_inputs @property def _snake_case ( self : Optional[int] ): return 13
698
1
import argparse import fairseq import torch from torch import nn from transformers import ( MBartaaTokenizer, MBartConfig, MBartForCausalLM, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaModel, logging, ) logging.set_verbosity_info() __A : List[Any] = logging.get_logger(__name__) __A : List[Any] = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', } __A : List[Any] = [ 'lm_head', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', ] def __a ( A__ : List[Any] , A__ : Dict , A__ : Tuple , A__ : Tuple , A__ : Dict ): for attribute in key.split("." ): SCREAMING_SNAKE_CASE = getattr(A__ , A__ ) if weight_type is not None: SCREAMING_SNAKE_CASE = getattr(A__ , A__ ).shape else: SCREAMING_SNAKE_CASE = hf_pointer.shape assert hf_shape == value.shape, ( F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" F" {value.shape} for {full_name}" ) if weight_type == "weight": SCREAMING_SNAKE_CASE = value elif weight_type == "weight_g": SCREAMING_SNAKE_CASE = value elif weight_type == "weight_v": SCREAMING_SNAKE_CASE = value elif weight_type == "bias": SCREAMING_SNAKE_CASE = value else: SCREAMING_SNAKE_CASE = value logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." ) def __a ( A__ : List[str] , A__ : Tuple ): SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = fairseq_model.state_dict() SCREAMING_SNAKE_CASE = hf_model.feature_extractor SCREAMING_SNAKE_CASE = hf_model.adapter for name, value in fairseq_dict.items(): SCREAMING_SNAKE_CASE = False if "conv_layers" in name: load_conv_layer( A__ , A__ , A__ , A__ , hf_model.config.feat_extract_norm == "group" , ) SCREAMING_SNAKE_CASE = True elif any(x in name for x in ["adaptor", "w2v_encoder.proj.", "w2v_proj_ln."] ): load_adapter(A__ , A__ , A__ , A__ ) SCREAMING_SNAKE_CASE = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: SCREAMING_SNAKE_CASE = True if "*" in mapped_key: SCREAMING_SNAKE_CASE = name.split(A__ )[0].split("." )[-2] SCREAMING_SNAKE_CASE = mapped_key.replace("*" , A__ ) if "weight_g" in name: SCREAMING_SNAKE_CASE = "weight_g" elif "weight_v" in name: SCREAMING_SNAKE_CASE = "weight_v" elif "bias" in name: SCREAMING_SNAKE_CASE = "bias" elif "weight" in name: SCREAMING_SNAKE_CASE = "weight" else: SCREAMING_SNAKE_CASE = None set_recursively(A__ , A__ , A__ , A__ , A__ ) continue if not is_used: unused_weights.append(A__ ) logger.warning(F"Unused weights: {unused_weights}" ) def __a ( A__ : List[str] , A__ : Optional[int] , A__ : Tuple , A__ : List[Any] , A__ : Dict ): SCREAMING_SNAKE_CASE = full_name.split("conv_layers." )[-1] SCREAMING_SNAKE_CASE = name.split("." ) SCREAMING_SNAKE_CASE = int(items[0] ) SCREAMING_SNAKE_CASE = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." ) SCREAMING_SNAKE_CASE = value logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." ) SCREAMING_SNAKE_CASE = value logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was" " found." ) SCREAMING_SNAKE_CASE = value logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F"{full_name} has size {value.shape}, but" F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found." ) SCREAMING_SNAKE_CASE = value logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) else: unused_weights.append(A__ ) def __a ( A__ : str , A__ : Dict , A__ : Optional[Any] , A__ : Any ): SCREAMING_SNAKE_CASE = full_name.split("adaptor." )[-1] SCREAMING_SNAKE_CASE = name.split("." ) if items[1].isdigit(): SCREAMING_SNAKE_CASE = int(items[1] ) else: SCREAMING_SNAKE_CASE = None if "adaptor" not in full_name: if "proj_ln" in full_name: # has to be layer norm if "bias" in name: assert ( value.shape == adapter.proj_layer_norm.bias.data.shape ), F"{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found." SCREAMING_SNAKE_CASE = value logger.info(F"Adapter proj layer norm bias was initialized from {full_name}." ) if "weight" in name: assert ( value.shape == adapter.proj_layer_norm.weight.data.shape ), F"{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found." SCREAMING_SNAKE_CASE = value else: # has to be projection layer if "bias" in name: assert ( value.shape == adapter.proj.bias.data.shape ), F"{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found." SCREAMING_SNAKE_CASE = value logger.info(F"Adapter proj layer bias was initialized from {full_name}." ) if "weight" in name: assert ( value.shape == adapter.proj.weight.data.shape ), F"{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found." SCREAMING_SNAKE_CASE = value logger.info(F"Adapter proj layer weight was initialized from {full_name}." ) elif isinstance(A__ , A__ ): if "bias" in name: assert ( value.shape == adapter.layers[layer_id].conv.bias.data.shape ), F"{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found." SCREAMING_SNAKE_CASE = value logger.info(F"Adapter layer {layer_id} bias was initialized from {full_name}." ) elif "weight" in name: assert ( value.shape == adapter.layers[layer_id].conv.weight.data.shape ), F"{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found." SCREAMING_SNAKE_CASE = value logger.info(F"Adapter layer {layer_id} bias was initialized from {full_name}." ) else: unused_weights.append(A__ ) def __a ( A__ : Optional[int] ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = emb.weight.shape SCREAMING_SNAKE_CASE = nn.Linear(A__ , A__ , bias=A__ ) SCREAMING_SNAKE_CASE = emb.weight.data return lin_layer @torch.no_grad() def __a ( A__ : Any , A__ : Optional[Any] , A__ : Dict , A__ : str , A__ : Dict , A__ : Optional[int] , A__ : Optional[Any] , A__ : Union[str, Any] , A__ : Any , A__ : int , A__ : Optional[int] , ): SCREAMING_SNAKE_CASE = WavaVecaConfig.from_pretrained( A__ , add_adapter=A__ , adapter_stride=A__ , adapter_kernel_size=A__ , use_auth_token=A__ , output_hidden_size=A__ , ) SCREAMING_SNAKE_CASE = MBartConfig.from_pretrained(A__ ) # load model SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={ "config_yaml": config_yaml_path, "data": "/".join(dict_path.split("/" )[:-1] ), "w2v_path": checkpoint_path, "load_pretrained_decoder_from": None, } , ) SCREAMING_SNAKE_CASE = model[0].eval() # load feature extractor SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained(A__ , use_auth_token=A__ ) # set weights for wav2vec2 encoder SCREAMING_SNAKE_CASE = WavaVecaModel(A__ ) recursively_load_weights_wavaveca(model.encoder , A__ ) # load decoder weights SCREAMING_SNAKE_CASE = MBartForCausalLM(A__ ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=A__ ) logger.warning(F"The following keys are missing when loading the decoder weights: {missing_keys}" ) logger.warning(F"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" ) SCREAMING_SNAKE_CASE = SpeechEncoderDecoderModel(encoder=A__ , decoder=A__ ) SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = MBartaaTokenizer(A__ ) tokenizer.save_pretrained(A__ ) SCREAMING_SNAKE_CASE = hf_wavavec.config.to_dict() SCREAMING_SNAKE_CASE = tokenizer.pad_token_id SCREAMING_SNAKE_CASE = tokenizer.bos_token_id SCREAMING_SNAKE_CASE = tokenizer.eos_token_id SCREAMING_SNAKE_CASE = "mbart50" SCREAMING_SNAKE_CASE = "wav2vec2" SCREAMING_SNAKE_CASE = tokenizer.eos_token_id SCREAMING_SNAKE_CASE = 250004 SCREAMING_SNAKE_CASE = tokenizer.eos_token_id SCREAMING_SNAKE_CASE = SpeechEncoderDecoderConfig.from_dict(A__ ) hf_wavavec.save_pretrained(A__ ) feature_extractor.save_pretrained(A__ ) if __name__ == "__main__": __A : Any = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_yaml_path', default=None, type=str, help='Path to yaml file of fine-tuned model') parser.add_argument( '--encoder_config_path', default='facebook/wav2vec2-xls-r-1b', type=str, help='Path to hf encoder wav2vec2 checkpoint config', ) parser.add_argument( '--decoder_config_path', default='facebook/mbart-large-50-one-to-many-mmt', type=str, help='Path to hf decoder checkpoint config', ) parser.add_argument('--add_adapter', default=True, type=bool, help='whethere to add model adapter layers') parser.add_argument('--adapter_stride', default=2, type=int, help='stride of adapter layers') parser.add_argument('--adapter_kernel_size', default=3, type=int, help='kernel size of adapter layers') parser.add_argument('--encoder_output_dim', default=1_0_2_4, type=int, help='encoder output dim') parser.add_argument('--start_token_id', default=2_5_0_0_0_4, type=int, help='`decoder_start_token_id` of model config') __A : Optional[Any] = parser.parse_args() convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, args.config_yaml_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, add_adapter=args.add_adapter, adapter_kernel_size=args.adapter_kernel_size, adapter_stride=args.adapter_stride, decoder_start_token_id=args.start_token_id, encoder_output_dim=args.encoder_output_dim, )
698
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available __A : Any = { 'configuration_longt5': ['LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LongT5Config', 'LongT5OnnxConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : List[str] = [ 'LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST', 'LongT5EncoderModel', 'LongT5ForConditionalGeneration', 'LongT5Model', 'LongT5PreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : List[Any] = [ 'FlaxLongT5ForConditionalGeneration', 'FlaxLongT5Model', 'FlaxLongT5PreTrainedModel', ] if TYPE_CHECKING: from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longta import ( LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST, LongTaEncoderModel, LongTaForConditionalGeneration, LongTaModel, LongTaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_longta import ( FlaxLongTaForConditionalGeneration, FlaxLongTaModel, FlaxLongTaPreTrainedModel, ) else: import sys __A : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
698
1
import argparse import json import os import torch from torch import nn from transformers import NllbMoeConfig, NllbMoeModel from transformers.modeling_utils import dtype_byte_size from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME def __a ( A__ : List[str] ): SCREAMING_SNAKE_CASE = [ "encoder.version", "decoder.version", "model.encoder.version", "model.decoder.version", "decoder.output_projection.weight", "_float_tensor", "encoder.embed_positions._float_tensor", "decoder.embed_positions._float_tensor", ] for k in ignore_keys: state_dict.pop(A__ , A__ ) def __a ( A__ : Tuple ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = emb.weight.shape SCREAMING_SNAKE_CASE = nn.Linear(A__ , A__ , bias=A__ ) SCREAMING_SNAKE_CASE = emb.weight.data return lin_layer def __a ( A__ : Tuple , A__ : Union[str, Any]=None ): SCREAMING_SNAKE_CASE = {} for old_key in state_dict.keys(): SCREAMING_SNAKE_CASE = old_key if "moe_layer.experts." in key: if expert_idx is not None: SCREAMING_SNAKE_CASE = key.replace("moe_layer.experts.0" , F"ffn.experts.expert_{expert_idx}" ) else: SCREAMING_SNAKE_CASE = key.replace("moe_layer.experts." , "ffn.experts.expert_" ) if "gate" in key: SCREAMING_SNAKE_CASE = key.replace(".moe_layer.gate.wg" , ".ffn.router.classifier" ) if "fc2" and "experts" not in key: SCREAMING_SNAKE_CASE = key.replace(".fc2." , ".ffn.fc2." ) if "fc1" and "experts" not in key: SCREAMING_SNAKE_CASE = key.replace(".fc1." , ".ffn.fc1." ) if ".encoder_attn." in key: SCREAMING_SNAKE_CASE = key.replace(".encoder_attn." , ".cross_attention." ) if "encoder_attn_layer_norm" in key: SCREAMING_SNAKE_CASE = key.replace("encoder_attn_layer_norm" , "cross_attention_layer_norm" ) if "final_layer_norm" in key: SCREAMING_SNAKE_CASE = key.replace("final_layer_norm" , "ff_layer_norm" ) SCREAMING_SNAKE_CASE = state_dict[old_key] return new_dict def __a ( A__ : List[str] , A__ : List[Any] , A__ : str , A__ : Union[str, Any] , A__ : str = WEIGHTS_NAME ): SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = 0 os.makedirs(A__ , exist_ok=A__ ) for expert in range(A__ ): SCREAMING_SNAKE_CASE = switch_checkpoint_path + F"-rank-{expert}.pt" if os.path.isfile(A__ ): SCREAMING_SNAKE_CASE = torch.load(A__ )["model"] remove_ignore_keys_(A__ ) SCREAMING_SNAKE_CASE = rename_fairseq_keys(A__ , A__ ) SCREAMING_SNAKE_CASE = os.path.join( A__ , weights_name.replace(".bin" , F"-{len(A__ )+1:05d}-of-???.bin" ) ) torch.save(A__ , A__ ) sharded_state_dicts.append(expert_state.keys() ) total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size( expert_state[list(A__ )[0]].dtype ) # Add the last block SCREAMING_SNAKE_CASE = os.path.join(A__ , weights_name.replace(".bin" , F"-{len(A__ )+1:05d}-of-???.bin" ) ) SCREAMING_SNAKE_CASE = torch.load(switch_checkpoint_path + "-shared.pt" )["model"] remove_ignore_keys_(A__ ) SCREAMING_SNAKE_CASE = rename_fairseq_keys(A__ , A__ ) SCREAMING_SNAKE_CASE = shared_weights["decoder.embed_tokens.weight"] sharded_state_dicts.append(shared_weights.keys() ) # If we only have the shared weights (dummy model/experts saved on the same file) if len(A__ ) == 1: SCREAMING_SNAKE_CASE = os.path.join(A__ , A__ ) torch.save(A__ , A__ ) return {weights_name: sharded_state_dicts[0]}, None else: torch.save(A__ , A__ ) # Otherwise, let's build the index SCREAMING_SNAKE_CASE = {} for idx, shard in enumerate(A__ ): SCREAMING_SNAKE_CASE = weights_name.replace(".bin" , F"-{idx+1:05d}-of-{len(A__ ):05d}.bin" ) SCREAMING_SNAKE_CASE = os.path.join(A__ , weights_name.replace(".bin" , F"-{idx+1:05d}-of-???.bin" ) ) os.rename(A__ , os.path.join(A__ , A__ ) ) for key in shard: SCREAMING_SNAKE_CASE = shard_file # Add the metadata SCREAMING_SNAKE_CASE = {"total_size": total_size} SCREAMING_SNAKE_CASE = {"metadata": metadata, "weight_map": weight_map} with open(os.path.join(A__ , A__ ) , "w" , encoding="utf-8" ) as f: SCREAMING_SNAKE_CASE = json.dumps(A__ , indent=2 , sort_keys=A__ ) + "\n" f.write(A__ ) return metadata, index if __name__ == "__main__": __A : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( '--nllb_moe_checkpoint_path', default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000', type=str, required=False, help='Path to a directory containing a folder per layer. Follows the original Google format.', ) parser.add_argument('--dtype', default='float32', type=str, required=False, help='dtype of the saved model') parser.add_argument( '--pytorch_dump_folder_path', default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b', type=str, required=False, help='Path to the output pytorch model.', ) __A : Optional[int] = parser.parse_args() __A , __A : Union[str, Any] = shard_on_the_fly( args.nllb_moe_checkpoint_path, args.pytorch_dump_folder_path, 1_2_8, args.dtype, ) __A : Any = NllbMoeConfig.from_pretrained( 'facebook/nllb-200-3.3B', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=1_2_8 ) config.save_pretrained(args.pytorch_dump_folder_path) __A : Any = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path) print('Done') model.save_pretrained(args.pytorch_dump_folder_path)
698
import cmath import math def __a ( A__ : float , A__ : float , A__ : float , A__ : float ): SCREAMING_SNAKE_CASE = math.radians(A__ ) SCREAMING_SNAKE_CASE = math.radians(A__ ) # Convert voltage and current to rectangular form SCREAMING_SNAKE_CASE = cmath.rect(A__ , A__ ) SCREAMING_SNAKE_CASE = cmath.rect(A__ , A__ ) # Calculate apparent power return voltage_rect * current_rect if __name__ == "__main__": import doctest doctest.testmod()
698
1
from __future__ import annotations from collections.abc import Generator def __a ( ): SCREAMING_SNAKE_CASE = {} SCREAMING_SNAKE_CASE = 2 while True: SCREAMING_SNAKE_CASE = factor_map.pop(A__ , A__ ) if factor: SCREAMING_SNAKE_CASE = factor + prime while x in factor_map: x += factor SCREAMING_SNAKE_CASE = factor else: SCREAMING_SNAKE_CASE = prime yield prime prime += 1 def __a ( A__ : float = 1E1_0 ): SCREAMING_SNAKE_CASE = sieve() SCREAMING_SNAKE_CASE = 1 while True: SCREAMING_SNAKE_CASE = next(A__ ) if (2 * prime * n) > limit: return n # Ignore the next prime as the reminder will be 2. next(A__ ) n += 2 if __name__ == "__main__": print(solution())
698
import argparse import json import os import torch from torch import nn from transformers import NllbMoeConfig, NllbMoeModel from transformers.modeling_utils import dtype_byte_size from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME def __a ( A__ : List[str] ): SCREAMING_SNAKE_CASE = [ "encoder.version", "decoder.version", "model.encoder.version", "model.decoder.version", "decoder.output_projection.weight", "_float_tensor", "encoder.embed_positions._float_tensor", "decoder.embed_positions._float_tensor", ] for k in ignore_keys: state_dict.pop(A__ , A__ ) def __a ( A__ : Tuple ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = emb.weight.shape SCREAMING_SNAKE_CASE = nn.Linear(A__ , A__ , bias=A__ ) SCREAMING_SNAKE_CASE = emb.weight.data return lin_layer def __a ( A__ : Tuple , A__ : Union[str, Any]=None ): SCREAMING_SNAKE_CASE = {} for old_key in state_dict.keys(): SCREAMING_SNAKE_CASE = old_key if "moe_layer.experts." in key: if expert_idx is not None: SCREAMING_SNAKE_CASE = key.replace("moe_layer.experts.0" , F"ffn.experts.expert_{expert_idx}" ) else: SCREAMING_SNAKE_CASE = key.replace("moe_layer.experts." , "ffn.experts.expert_" ) if "gate" in key: SCREAMING_SNAKE_CASE = key.replace(".moe_layer.gate.wg" , ".ffn.router.classifier" ) if "fc2" and "experts" not in key: SCREAMING_SNAKE_CASE = key.replace(".fc2." , ".ffn.fc2." ) if "fc1" and "experts" not in key: SCREAMING_SNAKE_CASE = key.replace(".fc1." , ".ffn.fc1." ) if ".encoder_attn." in key: SCREAMING_SNAKE_CASE = key.replace(".encoder_attn." , ".cross_attention." ) if "encoder_attn_layer_norm" in key: SCREAMING_SNAKE_CASE = key.replace("encoder_attn_layer_norm" , "cross_attention_layer_norm" ) if "final_layer_norm" in key: SCREAMING_SNAKE_CASE = key.replace("final_layer_norm" , "ff_layer_norm" ) SCREAMING_SNAKE_CASE = state_dict[old_key] return new_dict def __a ( A__ : List[str] , A__ : List[Any] , A__ : str , A__ : Union[str, Any] , A__ : str = WEIGHTS_NAME ): SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = 0 os.makedirs(A__ , exist_ok=A__ ) for expert in range(A__ ): SCREAMING_SNAKE_CASE = switch_checkpoint_path + F"-rank-{expert}.pt" if os.path.isfile(A__ ): SCREAMING_SNAKE_CASE = torch.load(A__ )["model"] remove_ignore_keys_(A__ ) SCREAMING_SNAKE_CASE = rename_fairseq_keys(A__ , A__ ) SCREAMING_SNAKE_CASE = os.path.join( A__ , weights_name.replace(".bin" , F"-{len(A__ )+1:05d}-of-???.bin" ) ) torch.save(A__ , A__ ) sharded_state_dicts.append(expert_state.keys() ) total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size( expert_state[list(A__ )[0]].dtype ) # Add the last block SCREAMING_SNAKE_CASE = os.path.join(A__ , weights_name.replace(".bin" , F"-{len(A__ )+1:05d}-of-???.bin" ) ) SCREAMING_SNAKE_CASE = torch.load(switch_checkpoint_path + "-shared.pt" )["model"] remove_ignore_keys_(A__ ) SCREAMING_SNAKE_CASE = rename_fairseq_keys(A__ , A__ ) SCREAMING_SNAKE_CASE = shared_weights["decoder.embed_tokens.weight"] sharded_state_dicts.append(shared_weights.keys() ) # If we only have the shared weights (dummy model/experts saved on the same file) if len(A__ ) == 1: SCREAMING_SNAKE_CASE = os.path.join(A__ , A__ ) torch.save(A__ , A__ ) return {weights_name: sharded_state_dicts[0]}, None else: torch.save(A__ , A__ ) # Otherwise, let's build the index SCREAMING_SNAKE_CASE = {} for idx, shard in enumerate(A__ ): SCREAMING_SNAKE_CASE = weights_name.replace(".bin" , F"-{idx+1:05d}-of-{len(A__ ):05d}.bin" ) SCREAMING_SNAKE_CASE = os.path.join(A__ , weights_name.replace(".bin" , F"-{idx+1:05d}-of-???.bin" ) ) os.rename(A__ , os.path.join(A__ , A__ ) ) for key in shard: SCREAMING_SNAKE_CASE = shard_file # Add the metadata SCREAMING_SNAKE_CASE = {"total_size": total_size} SCREAMING_SNAKE_CASE = {"metadata": metadata, "weight_map": weight_map} with open(os.path.join(A__ , A__ ) , "w" , encoding="utf-8" ) as f: SCREAMING_SNAKE_CASE = json.dumps(A__ , indent=2 , sort_keys=A__ ) + "\n" f.write(A__ ) return metadata, index if __name__ == "__main__": __A : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( '--nllb_moe_checkpoint_path', default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000', type=str, required=False, help='Path to a directory containing a folder per layer. Follows the original Google format.', ) parser.add_argument('--dtype', default='float32', type=str, required=False, help='dtype of the saved model') parser.add_argument( '--pytorch_dump_folder_path', default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b', type=str, required=False, help='Path to the output pytorch model.', ) __A : Optional[int] = parser.parse_args() __A , __A : Union[str, Any] = shard_on_the_fly( args.nllb_moe_checkpoint_path, args.pytorch_dump_folder_path, 1_2_8, args.dtype, ) __A : Any = NllbMoeConfig.from_pretrained( 'facebook/nllb-200-3.3B', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=1_2_8 ) config.save_pretrained(args.pytorch_dump_folder_path) __A : Any = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path) print('Done') model.save_pretrained(args.pytorch_dump_folder_path)
698
1
import unittest from transformers import SPIECE_UNDERLINE from transformers.models.speechta import SpeechTaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.tokenization_utils import AddedToken from ...test_tokenization_common import TokenizerTesterMixin __A : str = get_tests_dir('fixtures/test_sentencepiece_bpe_char.model') @require_sentencepiece @require_tokenizers class _SCREAMING_SNAKE_CASE ( __snake_case , unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = SpeechTaTokenizer lowerCamelCase__ = False lowerCamelCase__ = True def _snake_case ( self : Optional[int] ): super().setUp() # We have a SentencePiece fixture for testing SCREAMING_SNAKE_CASE = SpeechTaTokenizer(__lowerCamelCase ) SCREAMING_SNAKE_CASE = AddedToken("<mask>" , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) SCREAMING_SNAKE_CASE = mask_token tokenizer.add_special_tokens({"mask_token": mask_token} ) tokenizer.add_tokens(["<ctc_blank>"] ) tokenizer.save_pretrained(self.tmpdirname ) def _snake_case ( self : str , __lowerCamelCase : List[str] ): SCREAMING_SNAKE_CASE = "this is a test" SCREAMING_SNAKE_CASE = "this is a test" return input_text, output_text def _snake_case ( self : Dict , __lowerCamelCase : str , __lowerCamelCase : str=False , __lowerCamelCase : str=20 , __lowerCamelCase : str=5 ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.get_input_output_texts(__lowerCamelCase ) SCREAMING_SNAKE_CASE = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) SCREAMING_SNAKE_CASE = tokenizer.decode(__lowerCamelCase , clean_up_tokenization_spaces=__lowerCamelCase ) return text, ids def _snake_case ( self : Tuple ): SCREAMING_SNAKE_CASE = "<pad>" SCREAMING_SNAKE_CASE = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) , __lowerCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) , __lowerCamelCase ) def _snake_case ( self : List[str] ): SCREAMING_SNAKE_CASE = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<s>" ) self.assertEqual(vocab_keys[1] , "<pad>" ) self.assertEqual(vocab_keys[-4] , "œ" ) self.assertEqual(vocab_keys[-2] , "<mask>" ) self.assertEqual(vocab_keys[-1] , "<ctc_blank>" ) self.assertEqual(len(__lowerCamelCase ) , 81 ) def _snake_case ( self : List[Any] ): self.assertEqual(self.get_tokenizer().vocab_size , 79 ) def _snake_case ( self : List[Any] ): SCREAMING_SNAKE_CASE = self.get_tokenizers(do_lower_case=__lowerCamelCase ) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): SCREAMING_SNAKE_CASE = tokenizer.vocab_size SCREAMING_SNAKE_CASE = len(__lowerCamelCase ) self.assertNotEqual(__lowerCamelCase , 0 ) # We usually have added tokens from the start in tests because our vocab fixtures are # smaller than the original vocabs - let's not assert this # self.assertEqual(vocab_size, all_size) SCREAMING_SNAKE_CASE = ["aaaaa bbbbbb", "cccccccccdddddddd"] SCREAMING_SNAKE_CASE = tokenizer.add_tokens(__lowerCamelCase ) SCREAMING_SNAKE_CASE = tokenizer.vocab_size SCREAMING_SNAKE_CASE = len(__lowerCamelCase ) self.assertNotEqual(__lowerCamelCase , 0 ) self.assertEqual(__lowerCamelCase , __lowerCamelCase ) self.assertEqual(__lowerCamelCase , len(__lowerCamelCase ) ) self.assertEqual(__lowerCamelCase , all_size + len(__lowerCamelCase ) ) SCREAMING_SNAKE_CASE = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l" , add_special_tokens=__lowerCamelCase ) self.assertGreaterEqual(len(__lowerCamelCase ) , 4 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) SCREAMING_SNAKE_CASE = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"} SCREAMING_SNAKE_CASE = tokenizer.add_special_tokens(__lowerCamelCase ) SCREAMING_SNAKE_CASE = tokenizer.vocab_size SCREAMING_SNAKE_CASE = len(__lowerCamelCase ) self.assertNotEqual(__lowerCamelCase , 0 ) self.assertEqual(__lowerCamelCase , __lowerCamelCase ) self.assertEqual(__lowerCamelCase , len(__lowerCamelCase ) ) self.assertEqual(__lowerCamelCase , all_size_a + len(__lowerCamelCase ) ) SCREAMING_SNAKE_CASE = tokenizer.encode( ">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l" , add_special_tokens=__lowerCamelCase ) self.assertGreaterEqual(len(__lowerCamelCase ) , 6 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[0] , tokens[1] ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokens[-4] ) self.assertEqual(tokens[0] , tokenizer.eos_token_id ) self.assertEqual(tokens[-3] , tokenizer.pad_token_id ) def _snake_case ( self : str ): pass def _snake_case ( self : Any ): pass def _snake_case ( self : Optional[int] ): SCREAMING_SNAKE_CASE = self.get_tokenizer() SCREAMING_SNAKE_CASE = tokenizer.tokenize("This is a test" ) # fmt: off self.assertListEqual(__lowerCamelCase , [SPIECE_UNDERLINE, "T", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "a", SPIECE_UNDERLINE, "t", "e", "s", "t"] ) # fmt: on self.assertListEqual( tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , ) SCREAMING_SNAKE_CASE = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( __lowerCamelCase , [SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "92000", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] ) SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(__lowerCamelCase ) # fmt: off self.assertListEqual(__lowerCamelCase , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] ) # fmt: on SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(__lowerCamelCase ) self.assertListEqual( __lowerCamelCase , [SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "<unk>", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] ) @slow def _snake_case ( self : int ): # Use custom sequence because this tokenizer does not handle numbers. SCREAMING_SNAKE_CASE = [ "Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides " "general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural " "Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained " "models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.", "BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly " "conditioning on both left and right context in all layers.", "The quick brown fox jumps over the lazy dog.", ] # fmt: off SCREAMING_SNAKE_CASE = { "input_ids": [ [4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2], [4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], ], "attention_mask": [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] } # fmt: on self.tokenizer_integration_test_util( expected_encoding=__lowerCamelCase , model_name="microsoft/speecht5_asr" , revision="c5ef64c71905caeccde0e4462ef3f9077224c524" , sequences=__lowerCamelCase , )
698
import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast @require_vision class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE = tempfile.mkdtemp() SCREAMING_SNAKE_CASE = BlipImageProcessor() SCREAMING_SNAKE_CASE = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel" ) SCREAMING_SNAKE_CASE = BlipProcessor(__lowerCamelCase , __lowerCamelCase ) processor.save_pretrained(self.tmpdirname ) def _snake_case ( self : Dict , **__lowerCamelCase : Any ): return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase ).tokenizer def _snake_case ( self : List[Any] , **__lowerCamelCase : Optional[Any] ): return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase ).image_processor def _snake_case ( self : Union[str, Any] ): shutil.rmtree(self.tmpdirname ) def _snake_case ( self : Tuple ): SCREAMING_SNAKE_CASE = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] SCREAMING_SNAKE_CASE = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def _snake_case ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) SCREAMING_SNAKE_CASE = self.get_image_processor(do_normalize=__lowerCamelCase , padding_value=1.0 ) SCREAMING_SNAKE_CASE = BlipProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__lowerCamelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __lowerCamelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __lowerCamelCase ) def _snake_case ( self : Optional[int] ): SCREAMING_SNAKE_CASE = self.get_image_processor() SCREAMING_SNAKE_CASE = self.get_tokenizer() SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) SCREAMING_SNAKE_CASE = self.prepare_image_inputs() SCREAMING_SNAKE_CASE = image_processor(__lowerCamelCase , return_tensors="np" ) SCREAMING_SNAKE_CASE = processor(images=__lowerCamelCase , return_tensors="np" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def _snake_case ( self : Dict ): SCREAMING_SNAKE_CASE = self.get_image_processor() SCREAMING_SNAKE_CASE = self.get_tokenizer() SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) SCREAMING_SNAKE_CASE = "lower newer" SCREAMING_SNAKE_CASE = processor(text=__lowerCamelCase ) SCREAMING_SNAKE_CASE = tokenizer(__lowerCamelCase , return_token_type_ids=__lowerCamelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def _snake_case ( self : str ): SCREAMING_SNAKE_CASE = self.get_image_processor() SCREAMING_SNAKE_CASE = self.get_tokenizer() SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) SCREAMING_SNAKE_CASE = "lower newer" SCREAMING_SNAKE_CASE = self.prepare_image_inputs() SCREAMING_SNAKE_CASE = processor(text=__lowerCamelCase , images=__lowerCamelCase ) self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] ) # test if it raises when no input is passed with pytest.raises(__lowerCamelCase ): processor() def _snake_case ( self : Any ): SCREAMING_SNAKE_CASE = self.get_image_processor() SCREAMING_SNAKE_CASE = self.get_tokenizer() SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) SCREAMING_SNAKE_CASE = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] SCREAMING_SNAKE_CASE = processor.batch_decode(__lowerCamelCase ) SCREAMING_SNAKE_CASE = tokenizer.batch_decode(__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) def _snake_case ( self : Dict ): SCREAMING_SNAKE_CASE = self.get_image_processor() SCREAMING_SNAKE_CASE = self.get_tokenizer() SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) SCREAMING_SNAKE_CASE = "lower newer" SCREAMING_SNAKE_CASE = self.prepare_image_inputs() SCREAMING_SNAKE_CASE = processor(text=__lowerCamelCase , images=__lowerCamelCase ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
698
1
import math from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = 42 lowerCamelCase__ = None def __a ( A__ : int , A__ : Dict=0.9_9_9 , A__ : Optional[int]="cosine" , ): if alpha_transform_type == "cosine": def alpha_bar_fn(A__ : Tuple ): return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(A__ : Any ): return math.exp(t * -1_2.0 ) else: raise ValueError(F"Unsupported alpha_tranform_type: {alpha_transform_type}" ) SCREAMING_SNAKE_CASE = [] for i in range(A__ ): SCREAMING_SNAKE_CASE = i / num_diffusion_timesteps SCREAMING_SNAKE_CASE = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(A__ ) / alpha_bar_fn(A__ ) , A__ ) ) return torch.tensor(A__ , dtype=torch.floataa ) class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ): '''simple docstring''' @register_to_config def __init__( self : Optional[int] , __lowerCamelCase : int = 1000 , __lowerCamelCase : str = "fixed_small_log" , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[float] = 1.0 , __lowerCamelCase : str = "epsilon" , __lowerCamelCase : str = "squaredcos_cap_v2" , ): if beta_schedule != "squaredcos_cap_v2": raise ValueError("UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'" ) SCREAMING_SNAKE_CASE = betas_for_alpha_bar(__lowerCamelCase ) SCREAMING_SNAKE_CASE = 1.0 - self.betas SCREAMING_SNAKE_CASE = torch.cumprod(self.alphas , dim=0 ) SCREAMING_SNAKE_CASE = torch.tensor(1.0 ) # standard deviation of the initial noise distribution SCREAMING_SNAKE_CASE = 1.0 # setable values SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = torch.from_numpy(np.arange(0 , __lowerCamelCase )[::-1].copy() ) SCREAMING_SNAKE_CASE = variance_type def _snake_case ( self : Tuple , __lowerCamelCase : torch.FloatTensor , __lowerCamelCase : Optional[int] = None ): return sample def _snake_case ( self : Dict , __lowerCamelCase : int , __lowerCamelCase : Union[str, torch.device] = None ): SCREAMING_SNAKE_CASE = num_inference_steps SCREAMING_SNAKE_CASE = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1) SCREAMING_SNAKE_CASE = (np.arange(0 , __lowerCamelCase ) * step_ratio).round()[::-1].copy().astype(np.intaa ) SCREAMING_SNAKE_CASE = torch.from_numpy(__lowerCamelCase ).to(__lowerCamelCase ) def _snake_case ( self : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : int=None ): if prev_timestep is None: SCREAMING_SNAKE_CASE = t - 1 SCREAMING_SNAKE_CASE = self.alphas_cumprod[t] SCREAMING_SNAKE_CASE = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one SCREAMING_SNAKE_CASE = 1 - alpha_prod_t SCREAMING_SNAKE_CASE = 1 - alpha_prod_t_prev if prev_timestep == t - 1: SCREAMING_SNAKE_CASE = self.betas[t] else: SCREAMING_SNAKE_CASE = 1 - alpha_prod_t / alpha_prod_t_prev # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample SCREAMING_SNAKE_CASE = beta_prod_t_prev / beta_prod_t * beta if variance_type is None: SCREAMING_SNAKE_CASE = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small_log": SCREAMING_SNAKE_CASE = torch.log(torch.clamp(__lowerCamelCase , min=1e-20 ) ) SCREAMING_SNAKE_CASE = torch.exp(0.5 * variance ) elif variance_type == "learned_range": # NOTE difference with DDPM scheduler SCREAMING_SNAKE_CASE = variance.log() SCREAMING_SNAKE_CASE = beta.log() SCREAMING_SNAKE_CASE = (predicted_variance + 1) / 2 SCREAMING_SNAKE_CASE = frac * max_log + (1 - frac) * min_log return variance def _snake_case ( self : Dict , __lowerCamelCase : torch.FloatTensor , __lowerCamelCase : int , __lowerCamelCase : torch.FloatTensor , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : int=None , __lowerCamelCase : bool = True , ): SCREAMING_SNAKE_CASE = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range": SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = torch.split(__lowerCamelCase , sample.shape[1] , dim=1 ) else: SCREAMING_SNAKE_CASE = None # 1. compute alphas, betas if prev_timestep is None: SCREAMING_SNAKE_CASE = t - 1 SCREAMING_SNAKE_CASE = self.alphas_cumprod[t] SCREAMING_SNAKE_CASE = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one SCREAMING_SNAKE_CASE = 1 - alpha_prod_t SCREAMING_SNAKE_CASE = 1 - alpha_prod_t_prev if prev_timestep == t - 1: SCREAMING_SNAKE_CASE = self.betas[t] SCREAMING_SNAKE_CASE = self.alphas[t] else: SCREAMING_SNAKE_CASE = 1 - alpha_prod_t / alpha_prod_t_prev SCREAMING_SNAKE_CASE = 1 - beta # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": SCREAMING_SNAKE_CASE = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": SCREAMING_SNAKE_CASE = model_output else: raise ValueError( f"prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`" " for the UnCLIPScheduler." ) # 3. Clip "predicted x_0" if self.config.clip_sample: SCREAMING_SNAKE_CASE = torch.clamp( __lowerCamelCase , -self.config.clip_sample_range , self.config.clip_sample_range ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf SCREAMING_SNAKE_CASE = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t SCREAMING_SNAKE_CASE = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf SCREAMING_SNAKE_CASE = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise SCREAMING_SNAKE_CASE = 0 if t > 0: SCREAMING_SNAKE_CASE = randn_tensor( model_output.shape , dtype=model_output.dtype , generator=__lowerCamelCase , device=model_output.device ) SCREAMING_SNAKE_CASE = self._get_variance( __lowerCamelCase , predicted_variance=__lowerCamelCase , prev_timestep=__lowerCamelCase , ) if self.variance_type == "fixed_small_log": SCREAMING_SNAKE_CASE = variance elif self.variance_type == "learned_range": SCREAMING_SNAKE_CASE = (0.5 * variance).exp() else: raise ValueError( f"variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`" " for the UnCLIPScheduler." ) SCREAMING_SNAKE_CASE = variance * variance_noise SCREAMING_SNAKE_CASE = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return UnCLIPSchedulerOutput(prev_sample=__lowerCamelCase , pred_original_sample=__lowerCamelCase ) def _snake_case ( self : Optional[int] , __lowerCamelCase : torch.FloatTensor , __lowerCamelCase : torch.FloatTensor , __lowerCamelCase : torch.IntTensor , ): # Make sure alphas_cumprod and timestep have same device and dtype as original_samples SCREAMING_SNAKE_CASE = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype ) SCREAMING_SNAKE_CASE = timesteps.to(original_samples.device ) SCREAMING_SNAKE_CASE = alphas_cumprod[timesteps] ** 0.5 SCREAMING_SNAKE_CASE = sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ): SCREAMING_SNAKE_CASE = sqrt_alpha_prod.unsqueeze(-1 ) SCREAMING_SNAKE_CASE = (1 - alphas_cumprod[timesteps]) ** 0.5 SCREAMING_SNAKE_CASE = sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ): SCREAMING_SNAKE_CASE = sqrt_one_minus_alpha_prod.unsqueeze(-1 ) SCREAMING_SNAKE_CASE = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples
698
import unittest from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available from transformers.pipelines import pipeline from transformers.pipelines.document_question_answering import apply_tesseract from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_detectrona, require_pytesseract, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image from transformers.image_utils import load_image else: class _SCREAMING_SNAKE_CASE : '''simple docstring''' @staticmethod def _snake_case ( *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : Union[str, Any] ): pass def __a ( A__ : str ): return None # This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace, # so we can expect it to be available. __A : Tuple = ( 'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png' ) @is_pipeline_test @require_torch @require_vision class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING @require_pytesseract @require_vision def _snake_case ( self : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] ): SCREAMING_SNAKE_CASE = pipeline( "document-question-answering" , model=__lowerCamelCase , tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) SCREAMING_SNAKE_CASE = INVOICE_URL SCREAMING_SNAKE_CASE = list(zip(*apply_tesseract(load_image(__lowerCamelCase ) , __lowerCamelCase , "" ) ) ) SCREAMING_SNAKE_CASE = "What is the placebo?" SCREAMING_SNAKE_CASE = [ { "image": load_image(__lowerCamelCase ), "question": question, }, { "image": image, "question": question, }, { "image": image, "question": question, "word_boxes": word_boxes, }, ] return dqa_pipeline, examples def _snake_case ( self : List[Any] , __lowerCamelCase : int , __lowerCamelCase : int ): SCREAMING_SNAKE_CASE = dqa_pipeline(__lowerCamelCase , top_k=2 ) self.assertEqual( __lowerCamelCase , [ [ {"score": ANY(__lowerCamelCase ), "answer": ANY(__lowerCamelCase ), "start": ANY(__lowerCamelCase ), "end": ANY(__lowerCamelCase )}, {"score": ANY(__lowerCamelCase ), "answer": ANY(__lowerCamelCase ), "start": ANY(__lowerCamelCase ), "end": ANY(__lowerCamelCase )}, ] ] * 3 , ) @require_torch @require_detectrona @require_pytesseract def _snake_case ( self : Optional[int] ): SCREAMING_SNAKE_CASE = pipeline("document-question-answering" , model="hf-internal-testing/tiny-random-layoutlmv2" ) SCREAMING_SNAKE_CASE = INVOICE_URL SCREAMING_SNAKE_CASE = "How many cats are there?" SCREAMING_SNAKE_CASE = [ {"score": 0.0_001, "answer": "oy 2312/2019", "start": 38, "end": 39}, {"score": 0.0_001, "answer": "oy 2312/2019 DUE", "start": 38, "end": 40}, ] SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 ) self.assertEqual(nested_simplify(__lowerCamelCase , decimals=4 ) , __lowerCamelCase ) SCREAMING_SNAKE_CASE = dqa_pipeline({"image": image, "question": question} , top_k=2 ) self.assertEqual(nested_simplify(__lowerCamelCase , decimals=4 ) , __lowerCamelCase ) # This image does not detect ANY text in it, meaning layoutlmv2 should fail. # Empty answer probably SCREAMING_SNAKE_CASE = "./tests/fixtures/tests_samples/COCO/000000039769.png" SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 ) self.assertEqual(__lowerCamelCase , [] ) # We can optionnally pass directly the words and bounding boxes SCREAMING_SNAKE_CASE = "./tests/fixtures/tests_samples/COCO/000000039769.png" SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , words=__lowerCamelCase , boxes=__lowerCamelCase , top_k=2 ) self.assertEqual(__lowerCamelCase , [] ) @slow @require_torch @require_detectrona @require_pytesseract def _snake_case ( self : List[str] ): SCREAMING_SNAKE_CASE = pipeline( "document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , ) SCREAMING_SNAKE_CASE = INVOICE_URL SCREAMING_SNAKE_CASE = "What is the invoice number?" SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 ) self.assertEqual( nested_simplify(__lowerCamelCase , decimals=4 ) , [ {"score": 0.9_944, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0_009, "answer": "us-001", "start": 16, "end": 16}, ] , ) SCREAMING_SNAKE_CASE = dqa_pipeline({"image": image, "question": question} , top_k=2 ) self.assertEqual( nested_simplify(__lowerCamelCase , decimals=4 ) , [ {"score": 0.9_944, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0_009, "answer": "us-001", "start": 16, "end": 16}, ] , ) SCREAMING_SNAKE_CASE = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 ) self.assertEqual( nested_simplify(__lowerCamelCase , decimals=4 ) , [ [ {"score": 0.9_944, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0_009, "answer": "us-001", "start": 16, "end": 16}, ], ] * 2 , ) @slow @require_torch @require_detectrona @require_pytesseract def _snake_case ( self : str ): SCREAMING_SNAKE_CASE = pipeline( "document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , max_seq_len=50 , ) SCREAMING_SNAKE_CASE = INVOICE_URL SCREAMING_SNAKE_CASE = "What is the invoice number?" SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 ) self.assertEqual( nested_simplify(__lowerCamelCase , decimals=4 ) , [ {"score": 0.9_974, "answer": "1110212019", "start": 23, "end": 23}, {"score": 0.9_948, "answer": "us-001", "start": 16, "end": 16}, ] , ) SCREAMING_SNAKE_CASE = dqa_pipeline({"image": image, "question": question} , top_k=2 ) self.assertEqual( nested_simplify(__lowerCamelCase , decimals=4 ) , [ {"score": 0.9_974, "answer": "1110212019", "start": 23, "end": 23}, {"score": 0.9_948, "answer": "us-001", "start": 16, "end": 16}, ] , ) SCREAMING_SNAKE_CASE = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 ) self.assertEqual( nested_simplify(__lowerCamelCase , decimals=4 ) , [ [ {"score": 0.9_974, "answer": "1110212019", "start": 23, "end": 23}, {"score": 0.9_948, "answer": "us-001", "start": 16, "end": 16}, ] ] * 2 , ) @slow @require_torch @require_pytesseract @require_vision def _snake_case ( self : str ): SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained( "impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=__lowerCamelCase ) SCREAMING_SNAKE_CASE = pipeline( "document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=__lowerCamelCase , revision="3dc6de3" , ) SCREAMING_SNAKE_CASE = INVOICE_URL SCREAMING_SNAKE_CASE = "What is the invoice number?" SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 ) self.assertEqual( nested_simplify(__lowerCamelCase , decimals=4 ) , [ {"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23}, ] , ) SCREAMING_SNAKE_CASE = dqa_pipeline({"image": image, "question": question} , top_k=2 ) self.assertEqual( nested_simplify(__lowerCamelCase , decimals=4 ) , [ {"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23}, ] , ) SCREAMING_SNAKE_CASE = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 ) self.assertEqual( nested_simplify(__lowerCamelCase , decimals=4 ) , [ [ {"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23}, ] ] * 2 , ) SCREAMING_SNAKE_CASE = list(zip(*apply_tesseract(load_image(__lowerCamelCase ) , __lowerCamelCase , "" ) ) ) # This model should also work if `image` is set to None SCREAMING_SNAKE_CASE = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 ) self.assertEqual( nested_simplify(__lowerCamelCase , decimals=4 ) , [ {"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23}, ] , ) @slow @require_torch @require_pytesseract @require_vision def _snake_case ( self : Dict ): SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained( "impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=__lowerCamelCase ) SCREAMING_SNAKE_CASE = pipeline( "document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=__lowerCamelCase , revision="3dc6de3" , max_seq_len=50 , ) SCREAMING_SNAKE_CASE = INVOICE_URL SCREAMING_SNAKE_CASE = "What is the invoice number?" SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 ) self.assertEqual( nested_simplify(__lowerCamelCase , decimals=4 ) , [ {"score": 0.9_999, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.9_998, "answer": "us-001", "start": 16, "end": 16}, ] , ) SCREAMING_SNAKE_CASE = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 ) self.assertEqual( nested_simplify(__lowerCamelCase , decimals=4 ) , [ [ {"score": 0.9_999, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.9_998, "answer": "us-001", "start": 16, "end": 16}, ] ] * 2 , ) SCREAMING_SNAKE_CASE = list(zip(*apply_tesseract(load_image(__lowerCamelCase ) , __lowerCamelCase , "" ) ) ) # This model should also work if `image` is set to None SCREAMING_SNAKE_CASE = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 ) self.assertEqual( nested_simplify(__lowerCamelCase , decimals=4 ) , [ {"score": 0.9_999, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.9_998, "answer": "us-001", "start": 16, "end": 16}, ] , ) @slow @require_torch def _snake_case ( self : List[str] ): SCREAMING_SNAKE_CASE = pipeline( "document-question-answering" , model="naver-clova-ix/donut-base-finetuned-docvqa" , tokenizer=AutoTokenizer.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa" ) , feature_extractor="naver-clova-ix/donut-base-finetuned-docvqa" , ) SCREAMING_SNAKE_CASE = INVOICE_URL SCREAMING_SNAKE_CASE = "What is the invoice number?" SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 ) self.assertEqual(nested_simplify(__lowerCamelCase , decimals=4 ) , [{"answer": "us-001"}] ) @require_tf @unittest.skip("Document question answering not implemented in TF" ) def _snake_case ( self : List[Any] ): pass
698
1
from __future__ import annotations def __a ( A__ : int = 4 ): SCREAMING_SNAKE_CASE = abs(A__ ) or 4 return [[1 + x + y * row_size for x in range(A__ )] for y in range(A__ )] def __a ( A__ : list[list[int]] ): return reverse_row(transpose(A__ ) ) # OR.. transpose(reverse_column(matrix)) def __a ( A__ : list[list[int]] ): return reverse_row(reverse_column(A__ ) ) # OR.. reverse_column(reverse_row(matrix)) def __a ( A__ : list[list[int]] ): return reverse_column(transpose(A__ ) ) # OR.. transpose(reverse_row(matrix)) def __a ( A__ : list[list[int]] ): SCREAMING_SNAKE_CASE = [list(A__ ) for x in zip(*A__ )] return matrix def __a ( A__ : list[list[int]] ): SCREAMING_SNAKE_CASE = matrix[::-1] return matrix def __a ( A__ : list[list[int]] ): SCREAMING_SNAKE_CASE = [x[::-1] for x in matrix] return matrix def __a ( A__ : list[list[int]] ): for i in matrix: print(*A__ ) if __name__ == "__main__": __A : Tuple = make_matrix() print('\norigin:\n') print_matrix(matrix) print('\nrotate 90 counterclockwise:\n') print_matrix(rotate_aa(matrix)) __A : List[str] = make_matrix() print('\norigin:\n') print_matrix(matrix) print('\nrotate 180:\n') print_matrix(rotate_aaa(matrix)) __A : Any = make_matrix() print('\norigin:\n') print_matrix(matrix) print('\nrotate 270 counterclockwise:\n') print_matrix(rotate_aaa(matrix))
698
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer from .base import PipelineTool class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = "facebook/bart-large-mnli" lowerCamelCase__ = ( "This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which " "should be the text to classify, and `labels`, which should be the list of labels to use for classification. " "It returns the most likely label in the list of provided `labels` for the input text." ) lowerCamelCase__ = "text_classifier" lowerCamelCase__ = AutoTokenizer lowerCamelCase__ = AutoModelForSequenceClassification lowerCamelCase__ = ["text", ["text"]] lowerCamelCase__ = ["text"] def _snake_case ( self : Optional[Any] ): super().setup() SCREAMING_SNAKE_CASE = self.model.config SCREAMING_SNAKE_CASE = -1 for idx, label in config.idalabel.items(): if label.lower().startswith("entail" ): SCREAMING_SNAKE_CASE = int(__lowerCamelCase ) if self.entailment_id == -1: raise ValueError("Could not determine the entailment ID from the model config, please pass it at init." ) def _snake_case ( self : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] ): SCREAMING_SNAKE_CASE = labels return self.pre_processor( [text] * len(__lowerCamelCase ) , [f"This example is {label}" for label in labels] , return_tensors="pt" , padding="max_length" , ) def _snake_case ( self : str , __lowerCamelCase : Optional[int] ): SCREAMING_SNAKE_CASE = outputs.logits SCREAMING_SNAKE_CASE = torch.argmax(logits[:, 2] ).item() return self._labels[label_id]
698
1
import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / 'utils')) from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 __A : Dict = get_tests_dir('fixtures') class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self : Dict ): # A mock response for an HTTP head request to emulate server down SCREAMING_SNAKE_CASE = mock.Mock() SCREAMING_SNAKE_CASE = 500 SCREAMING_SNAKE_CASE = {} SCREAMING_SNAKE_CASE = HTTPError SCREAMING_SNAKE_CASE = {} # Download this model to make sure it's in the cache. SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch("requests.Session.request" , return_value=__lowerCamelCase ) as mock_head: SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" ) # This check we did call the fake head request mock_head.assert_called() def _snake_case ( self : int ): # This test is for deprecated behavior and can be removed in v5 SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained( "https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json" ) @is_staging_test class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' @classmethod def _snake_case ( cls : Optional[int] ): SCREAMING_SNAKE_CASE = TOKEN HfFolder.save_token(__lowerCamelCase ) @classmethod def _snake_case ( cls : List[str] ): try: delete_repo(token=cls._token , repo_id="test-feature-extractor" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="valid_org/test-feature-extractor-org" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="test-dynamic-feature-extractor" ) except HTTPError: pass def _snake_case ( self : Tuple ): SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained(__lowerCamelCase ) feature_extractor.push_to_hub("test-feature-extractor" , use_auth_token=self._token ) SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained(f"{USER}/test-feature-extractor" ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase ) ) # Reset repo delete_repo(token=self._token , repo_id="test-feature-extractor" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained( __lowerCamelCase , repo_id="test-feature-extractor" , push_to_hub=__lowerCamelCase , use_auth_token=self._token ) SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained(f"{USER}/test-feature-extractor" ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase ) ) def _snake_case ( self : str ): SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained(__lowerCamelCase ) feature_extractor.push_to_hub("valid_org/test-feature-extractor" , use_auth_token=self._token ) SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor" ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase ) ) # Reset repo delete_repo(token=self._token , repo_id="valid_org/test-feature-extractor" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained( __lowerCamelCase , repo_id="valid_org/test-feature-extractor-org" , push_to_hub=__lowerCamelCase , use_auth_token=self._token ) SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor-org" ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase ) ) def _snake_case ( self : Optional[Any] ): CustomFeatureExtractor.register_for_auto_class() SCREAMING_SNAKE_CASE = CustomFeatureExtractor.from_pretrained(__lowerCamelCase ) feature_extractor.push_to_hub("test-dynamic-feature-extractor" , use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual( feature_extractor.auto_map , {"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor"} , ) SCREAMING_SNAKE_CASE = AutoFeatureExtractor.from_pretrained( f"{USER}/test-dynamic-feature-extractor" , trust_remote_code=__lowerCamelCase ) # Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module self.assertEqual(new_feature_extractor.__class__.__name__ , "CustomFeatureExtractor" )
698
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.test_utils import execute_subprocess_async def __a ( A__ : str=None ): if subparsers is not None: SCREAMING_SNAKE_CASE = subparsers.add_parser("test" ) else: SCREAMING_SNAKE_CASE = argparse.ArgumentParser("Accelerate test command" ) parser.add_argument( "--config_file" , default=A__ , help=( "The path to use to store the config file. Will default to a file named default_config.yaml in the cache " "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have " "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed " "with 'huggingface'." ) , ) if subparsers is not None: parser.set_defaults(func=A__ ) return parser def __a ( A__ : Tuple ): SCREAMING_SNAKE_CASE = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] ) if args.config_file is None: SCREAMING_SNAKE_CASE = script_name else: SCREAMING_SNAKE_CASE = F"--config_file={args.config_file} {script_name}" SCREAMING_SNAKE_CASE = ["accelerate-launch"] + test_args.split() SCREAMING_SNAKE_CASE = execute_subprocess_async(A__ , env=os.environ.copy() ) if result.returncode == 0: print("Test is a success! You are ready for your distributed training!" ) def __a ( ): SCREAMING_SNAKE_CASE = test_command_parser() SCREAMING_SNAKE_CASE = parser.parse_args() test_command(A__ ) if __name__ == "__main__": main()
698
1
from dataclasses import dataclass from typing import Optional, Tuple import torch from torch import nn from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel from transformers.utils import ModelOutput @dataclass class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = None lowerCamelCase__ = None lowerCamelCase__ = None lowerCamelCase__ = None class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' def __init__( self : List[Any] , __lowerCamelCase : List[str]=1 , __lowerCamelCase : Dict=0 , __lowerCamelCase : int=2 , __lowerCamelCase : Optional[int]=512 , __lowerCamelCase : Optional[Any]="cls" , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Tuple=True , **__lowerCamelCase : List[Any] , ): super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase ) SCREAMING_SNAKE_CASE = project_dim SCREAMING_SNAKE_CASE = pooler_fn SCREAMING_SNAKE_CASE = learn_encoder SCREAMING_SNAKE_CASE = use_attention_mask class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = [R"pooler", R"logit_scale"] lowerCamelCase__ = [R"position_ids", R"predictions.decoder.bias"] lowerCamelCase__ = "roberta" lowerCamelCase__ = RobertaSeriesConfig def __init__( self : Union[str, Any] , __lowerCamelCase : Tuple ): super().__init__(__lowerCamelCase ) SCREAMING_SNAKE_CASE = XLMRobertaModel(__lowerCamelCase ) SCREAMING_SNAKE_CASE = nn.Linear(config.hidden_size , config.project_dim ) SCREAMING_SNAKE_CASE = getattr(__lowerCamelCase , "has_pre_transformation" , __lowerCamelCase ) if self.has_pre_transformation: SCREAMING_SNAKE_CASE = nn.Linear(config.hidden_size , config.project_dim ) SCREAMING_SNAKE_CASE = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps ) self.post_init() def _snake_case ( self : Any , __lowerCamelCase : Optional[torch.Tensor] = None , __lowerCamelCase : Optional[torch.Tensor] = None , __lowerCamelCase : Optional[torch.Tensor] = None , __lowerCamelCase : Optional[torch.Tensor] = None , __lowerCamelCase : Optional[torch.Tensor] = None , __lowerCamelCase : Optional[torch.Tensor] = None , __lowerCamelCase : Optional[torch.Tensor] = None , __lowerCamelCase : Optional[torch.Tensor] = None , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : Optional[bool] = None , ): SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict SCREAMING_SNAKE_CASE = self.base_model( input_ids=__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , position_ids=__lowerCamelCase , head_mask=__lowerCamelCase , inputs_embeds=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , output_attentions=__lowerCamelCase , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=__lowerCamelCase , ) if self.has_pre_transformation: SCREAMING_SNAKE_CASE = outputs["hidden_states"][-2] SCREAMING_SNAKE_CASE = self.pre_LN(__lowerCamelCase ) SCREAMING_SNAKE_CASE = self.transformation_pre(__lowerCamelCase ) return TransformationModelOutput( projection_state=__lowerCamelCase , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , ) else: SCREAMING_SNAKE_CASE = self.transformation(outputs.last_hidden_state ) return TransformationModelOutput( projection_state=__lowerCamelCase , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
698
import json import os from functools import lru_cache from typing import TYPE_CHECKING, List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation __A : Optional[int] = logging.get_logger(__name__) __A : List[str] = { 'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_config_file': 'tokenizer_config.json', } __A : Tuple = { 'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'}, 'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'}, 'tokenizer_config_file': { 'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json' }, } __A : Dict = {'facebook/blenderbot-3B': 1_2_8} @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def __a ( ): SCREAMING_SNAKE_CASE = ( list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) ) ) SCREAMING_SNAKE_CASE = bs[:] SCREAMING_SNAKE_CASE = 0 for b in range(2**8 ): if b not in bs: bs.append(A__ ) cs.append(2**8 + n ) n += 1 SCREAMING_SNAKE_CASE = [chr(A__ ) for n in cs] return dict(zip(A__ , A__ ) ) def __a ( A__ : Optional[int] ): SCREAMING_SNAKE_CASE = set() SCREAMING_SNAKE_CASE = word[0] for char in word[1:]: pairs.add((prev_char, char) ) SCREAMING_SNAKE_CASE = char return pairs class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = VOCAB_FILES_NAMES lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__ = ["input_ids", "attention_mask"] def __init__( self : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : List[Any]="replace" , __lowerCamelCase : Any="<s>" , __lowerCamelCase : List[Any]="</s>" , __lowerCamelCase : Dict="</s>" , __lowerCamelCase : Any="<s>" , __lowerCamelCase : Optional[Any]="<unk>" , __lowerCamelCase : str="<pad>" , __lowerCamelCase : List[Any]="<mask>" , __lowerCamelCase : List[Any]=False , **__lowerCamelCase : Tuple , ): SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token super().__init__( errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , **__lowerCamelCase , ) with open(__lowerCamelCase , encoding="utf-8" ) as vocab_handle: SCREAMING_SNAKE_CASE = json.load(__lowerCamelCase ) SCREAMING_SNAKE_CASE = {v: k for k, v in self.encoder.items()} SCREAMING_SNAKE_CASE = errors # how to handle errors in decoding SCREAMING_SNAKE_CASE = bytes_to_unicode() SCREAMING_SNAKE_CASE = {v: k for k, v in self.byte_encoder.items()} with open(__lowerCamelCase , encoding="utf-8" ) as merges_handle: SCREAMING_SNAKE_CASE = merges_handle.read().split("\n" )[1:-1] SCREAMING_SNAKE_CASE = [tuple(merge.split() ) for merge in bpe_merges] SCREAMING_SNAKE_CASE = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) ) SCREAMING_SNAKE_CASE = {} SCREAMING_SNAKE_CASE = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions SCREAMING_SNAKE_CASE = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" ) @property # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot def _snake_case ( self : Dict ): return len(self.encoder ) def _snake_case ( self : int ): return dict(self.encoder , **self.added_tokens_encoder ) def _snake_case ( self : Tuple , __lowerCamelCase : List[str] ): if token in self.cache: return self.cache[token] SCREAMING_SNAKE_CASE = tuple(__lowerCamelCase ) SCREAMING_SNAKE_CASE = get_pairs(__lowerCamelCase ) if not pairs: return token while True: SCREAMING_SNAKE_CASE = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float("inf" ) ) ) if bigram not in self.bpe_ranks: break SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = bigram SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = 0 while i < len(__lowerCamelCase ): try: SCREAMING_SNAKE_CASE = word.index(__lowerCamelCase , __lowerCamelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) SCREAMING_SNAKE_CASE = j if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 SCREAMING_SNAKE_CASE = tuple(__lowerCamelCase ) SCREAMING_SNAKE_CASE = new_word if len(__lowerCamelCase ) == 1: break else: SCREAMING_SNAKE_CASE = get_pairs(__lowerCamelCase ) SCREAMING_SNAKE_CASE = " ".join(__lowerCamelCase ) SCREAMING_SNAKE_CASE = word return word def _snake_case ( self : str , __lowerCamelCase : Dict ): SCREAMING_SNAKE_CASE = [] for token in re.findall(self.pat , __lowerCamelCase ): SCREAMING_SNAKE_CASE = "".join( self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowerCamelCase ).split(" " ) ) return bpe_tokens def _snake_case ( self : List[str] , __lowerCamelCase : str ): return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) ) def _snake_case ( self : Optional[int] , __lowerCamelCase : Tuple ): return self.decoder.get(__lowerCamelCase ) def _snake_case ( self : Tuple , __lowerCamelCase : str ): SCREAMING_SNAKE_CASE = "".join(__lowerCamelCase ) SCREAMING_SNAKE_CASE = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors ) return text def _snake_case ( self : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ): if not os.path.isdir(__lowerCamelCase ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return SCREAMING_SNAKE_CASE = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) SCREAMING_SNAKE_CASE = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + "\n" ) SCREAMING_SNAKE_CASE = 0 with open(__lowerCamelCase , "w" , encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase : kv[1] ): if index != token_index: logger.warning( f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive." " Please check that the tokenizer is not corrupted!" ) SCREAMING_SNAKE_CASE = token_index writer.write(" ".join(__lowerCamelCase ) + "\n" ) index += 1 return vocab_file, merge_file def _snake_case ( self : Tuple , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase ) if token_ids_a is None: return [1] + ([0] * len(__lowerCamelCase )) + [1] return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1] def _snake_case ( self : str , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ): SCREAMING_SNAKE_CASE = [self.sep_token_id] SCREAMING_SNAKE_CASE = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _snake_case ( self : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict=False , **__lowerCamelCase : Optional[int] ): SCREAMING_SNAKE_CASE = kwargs.pop("add_prefix_space" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(__lowerCamelCase ) > 0 and not text[0].isspace()): SCREAMING_SNAKE_CASE = " " + text return (text, kwargs) def _snake_case ( self : Dict , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ): return token_ids_a + [self.eos_token_id] def _snake_case ( self : Tuple , __lowerCamelCase : "Conversation" ): SCREAMING_SNAKE_CASE = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(" " + text ) else: # Generated responses should contain them already. inputs.append(__lowerCamelCase ) SCREAMING_SNAKE_CASE = " ".join(__lowerCamelCase ) SCREAMING_SNAKE_CASE = self.encode(__lowerCamelCase ) if len(__lowerCamelCase ) > self.model_max_length: SCREAMING_SNAKE_CASE = input_ids[-self.model_max_length :] logger.warning(f"Trimmed input from conversation as it was longer than {self.model_max_length} tokens." ) return input_ids
698
1
def __a ( A__ : int = 10 , A__ : int = 1000 , A__ : bool = True ): assert ( isinstance(A__ , A__ ) and isinstance(A__ , A__ ) and isinstance(A__ , A__ ) ), "Invalid type of value(s) specified to function!" if min_val > max_val: raise ValueError("Invalid value for min_val or max_val (min_value < max_value)" ) return min_val if option else max_val def __a ( A__ : int , A__ : int ): return int((number_a + number_a) / 2 ) def __a ( A__ : int , A__ : int , A__ : int ): assert ( isinstance(A__ , A__ ) and isinstance(A__ , A__ ) and isinstance(A__ , A__ ) ), 'argument values must be type of "int"' if lower > higher: raise ValueError("argument value for lower and higher must be(lower > higher)" ) if not lower < to_guess < higher: raise ValueError( "guess value must be within the range of lower and higher value" ) def answer(A__ : int ) -> str: if number > to_guess: return "high" elif number < to_guess: return "low" else: return "same" print("started..." ) SCREAMING_SNAKE_CASE = lower SCREAMING_SNAKE_CASE = higher SCREAMING_SNAKE_CASE = [] while True: SCREAMING_SNAKE_CASE = get_avg(A__ , A__ ) last_numbers.append(A__ ) if answer(A__ ) == "low": SCREAMING_SNAKE_CASE = number elif answer(A__ ) == "high": SCREAMING_SNAKE_CASE = number else: break print(F"guess the number : {last_numbers[-1]}" ) print(F"details : {last_numbers!s}" ) def __a ( ): SCREAMING_SNAKE_CASE = int(input("Enter lower value : " ).strip() ) SCREAMING_SNAKE_CASE = int(input("Enter high value : " ).strip() ) SCREAMING_SNAKE_CASE = int(input("Enter value to guess : " ).strip() ) guess_the_number(A__ , A__ , A__ ) if __name__ == "__main__": main()
698
from __future__ import annotations from cmath import sqrt def __a ( A__ : int , A__ : int , A__ : int ): if a == 0: raise ValueError("Coefficient 'a' must not be zero." ) SCREAMING_SNAKE_CASE = b * b - 4 * a * c SCREAMING_SNAKE_CASE = (-b + sqrt(A__ )) / (2 * a) SCREAMING_SNAKE_CASE = (-b - sqrt(A__ )) / (2 * a) return ( root_a.real if not root_a.imag else root_a, root_a.real if not root_a.imag else root_a, ) def __a ( ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = quadratic_roots(a=5 , b=6 , c=1 ) print(F"The solutions are: {solutiona} and {solutiona}" ) if __name__ == "__main__": main()
698
1
import numpy as np import pandas as pd from sklearn.preprocessing import MinMaxScaler from tensorflow.keras.layers import LSTM, Dense from tensorflow.keras.models import Sequential if __name__ == "__main__": __A : List[str] = pd.read_csv('sample_data.csv', header=None) __A : List[Any] = df.shape[:1][0] # If you're using some other dataset input the target column __A : Tuple = df.iloc[:, 1:2] __A : List[str] = actual_data.values.reshape(len_data, 1) __A : Tuple = MinMaxScaler().fit_transform(actual_data) __A : Any = 1_0 __A : Any = 5 __A : Union[str, Any] = 2_0 __A : str = len_data - periods * look_back __A : Optional[Any] = actual_data[:division] __A : str = actual_data[division - look_back :] __A , __A : Tuple = [], [] __A , __A : List[str] = [], [] for i in range(0, len(train_data) - forward_days - look_back + 1): train_x.append(train_data[i : i + look_back]) train_y.append(train_data[i + look_back : i + look_back + forward_days]) for i in range(0, len(test_data) - forward_days - look_back + 1): test_x.append(test_data[i : i + look_back]) test_y.append(test_data[i + look_back : i + look_back + forward_days]) __A : Optional[Any] = np.array(train_x) __A : Any = np.array(test_x) __A : Optional[int] = np.array([list(i.ravel()) for i in train_y]) __A : Dict = np.array([list(i.ravel()) for i in test_y]) __A : Optional[Any] = Sequential() model.add(LSTM(1_2_8, input_shape=(look_back, 1), return_sequences=True)) model.add(LSTM(6_4, input_shape=(1_2_8, 1))) model.add(Dense(forward_days)) model.compile(loss='mean_squared_error', optimizer='adam') __A : Union[str, Any] = model.fit( x_train, y_train, epochs=1_5_0, verbose=1, shuffle=True, batch_size=4 ) __A : str = model.predict(x_test)
698
import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self : Dict ): SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__lowerCamelCase ) SCREAMING_SNAKE_CASE = -1 SCREAMING_SNAKE_CASE = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCamelCase ) SCREAMING_SNAKE_CASE = model.generate(__lowerCamelCase , max_new_tokens=10 , do_sample=__lowerCamelCase ) SCREAMING_SNAKE_CASE = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: SCREAMING_SNAKE_CASE = TextStreamer(__lowerCamelCase ) model.generate(__lowerCamelCase , max_new_tokens=10 , do_sample=__lowerCamelCase , streamer=__lowerCamelCase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer SCREAMING_SNAKE_CASE = cs.out[:-1] self.assertEqual(__lowerCamelCase , __lowerCamelCase ) def _snake_case ( self : int ): SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__lowerCamelCase ) SCREAMING_SNAKE_CASE = -1 SCREAMING_SNAKE_CASE = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCamelCase ) SCREAMING_SNAKE_CASE = model.generate(__lowerCamelCase , max_new_tokens=10 , do_sample=__lowerCamelCase ) SCREAMING_SNAKE_CASE = tokenizer.decode(greedy_ids[0] ) SCREAMING_SNAKE_CASE = TextIteratorStreamer(__lowerCamelCase ) SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer} SCREAMING_SNAKE_CASE = Thread(target=model.generate , kwargs=__lowerCamelCase ) thread.start() SCREAMING_SNAKE_CASE = "" for new_text in streamer: streamer_text += new_text self.assertEqual(__lowerCamelCase , __lowerCamelCase ) def _snake_case ( self : List[str] ): SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__lowerCamelCase ) SCREAMING_SNAKE_CASE = -1 SCREAMING_SNAKE_CASE = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCamelCase ) SCREAMING_SNAKE_CASE = model.generate(__lowerCamelCase , max_new_tokens=10 , do_sample=__lowerCamelCase ) SCREAMING_SNAKE_CASE = greedy_ids[:, input_ids.shape[1] :] SCREAMING_SNAKE_CASE = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: SCREAMING_SNAKE_CASE = TextStreamer(__lowerCamelCase , skip_prompt=__lowerCamelCase ) model.generate(__lowerCamelCase , max_new_tokens=10 , do_sample=__lowerCamelCase , streamer=__lowerCamelCase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer SCREAMING_SNAKE_CASE = cs.out[:-1] self.assertEqual(__lowerCamelCase , __lowerCamelCase ) def _snake_case ( self : int ): # Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested # with actual models -- the dummy models' tokenizers are not aligned with their models, and # `skip_special_tokens=True` has no effect on them SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("distilgpt2" ) SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(__lowerCamelCase ) SCREAMING_SNAKE_CASE = -1 SCREAMING_SNAKE_CASE = torch.ones((1, 5) , device=__lowerCamelCase ).long() * model.config.bos_token_id with CaptureStdout() as cs: SCREAMING_SNAKE_CASE = TextStreamer(__lowerCamelCase , skip_special_tokens=__lowerCamelCase ) model.generate(__lowerCamelCase , max_new_tokens=1 , do_sample=__lowerCamelCase , streamer=__lowerCamelCase ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token SCREAMING_SNAKE_CASE = cs.out[:-1] # Remove the final "\n" SCREAMING_SNAKE_CASE = tokenizer(__lowerCamelCase , return_tensors="pt" ) self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) ) def _snake_case ( self : Optional[int] ): SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__lowerCamelCase ) SCREAMING_SNAKE_CASE = -1 SCREAMING_SNAKE_CASE = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCamelCase ) SCREAMING_SNAKE_CASE = TextIteratorStreamer(__lowerCamelCase , timeout=0.001 ) SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer} SCREAMING_SNAKE_CASE = Thread(target=model.generate , kwargs=__lowerCamelCase ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(__lowerCamelCase ): SCREAMING_SNAKE_CASE = "" for new_text in streamer: streamer_text += new_text
698
1
import copy from collections import OrderedDict from typing import Dict, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING __A : Union[str, Any] = logging.get_logger(__name__) __A : List[str] = { 'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json', # See all DETR models at https://huggingface.co/models?filter=detr } class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = "detr" lowerCamelCase__ = ["past_key_values"] lowerCamelCase__ = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", } def __init__( self : Optional[Any] , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Dict=None , __lowerCamelCase : Union[str, Any]=3 , __lowerCamelCase : int=100 , __lowerCamelCase : List[str]=6 , __lowerCamelCase : Dict=2048 , __lowerCamelCase : str=8 , __lowerCamelCase : int=6 , __lowerCamelCase : List[str]=2048 , __lowerCamelCase : Tuple=8 , __lowerCamelCase : Any=0.0 , __lowerCamelCase : Optional[int]=0.0 , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : List[Any]="relu" , __lowerCamelCase : List[str]=256 , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : Optional[Any]=0.0 , __lowerCamelCase : Optional[int]=0.0 , __lowerCamelCase : int=0.02 , __lowerCamelCase : List[Any]=1.0 , __lowerCamelCase : Union[str, Any]=False , __lowerCamelCase : str="sine" , __lowerCamelCase : Union[str, Any]="resnet50" , __lowerCamelCase : Any=True , __lowerCamelCase : List[Any]=False , __lowerCamelCase : Optional[Any]=1 , __lowerCamelCase : Union[str, Any]=5 , __lowerCamelCase : List[Any]=2 , __lowerCamelCase : Dict=1 , __lowerCamelCase : Tuple=1 , __lowerCamelCase : Tuple=5 , __lowerCamelCase : Any=2 , __lowerCamelCase : Optional[int]=0.1 , **__lowerCamelCase : Optional[int] , ): if backbone_config is not None and use_timm_backbone: raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." ) if not use_timm_backbone: if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) SCREAMING_SNAKE_CASE = CONFIG_MAPPING["resnet"](out_features=["stage4"] ) elif isinstance(__lowerCamelCase , __lowerCamelCase ): SCREAMING_SNAKE_CASE = backbone_config.get("model_type" ) SCREAMING_SNAKE_CASE = CONFIG_MAPPING[backbone_model_type] SCREAMING_SNAKE_CASE = config_class.from_dict(__lowerCamelCase ) # set timm attributes to None SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None, None, None SCREAMING_SNAKE_CASE = use_timm_backbone SCREAMING_SNAKE_CASE = backbone_config SCREAMING_SNAKE_CASE = num_channels SCREAMING_SNAKE_CASE = num_queries SCREAMING_SNAKE_CASE = d_model SCREAMING_SNAKE_CASE = encoder_ffn_dim SCREAMING_SNAKE_CASE = encoder_layers SCREAMING_SNAKE_CASE = encoder_attention_heads SCREAMING_SNAKE_CASE = decoder_ffn_dim SCREAMING_SNAKE_CASE = decoder_layers SCREAMING_SNAKE_CASE = decoder_attention_heads SCREAMING_SNAKE_CASE = dropout SCREAMING_SNAKE_CASE = attention_dropout SCREAMING_SNAKE_CASE = activation_dropout SCREAMING_SNAKE_CASE = activation_function SCREAMING_SNAKE_CASE = init_std SCREAMING_SNAKE_CASE = init_xavier_std SCREAMING_SNAKE_CASE = encoder_layerdrop SCREAMING_SNAKE_CASE = decoder_layerdrop SCREAMING_SNAKE_CASE = encoder_layers SCREAMING_SNAKE_CASE = auxiliary_loss SCREAMING_SNAKE_CASE = position_embedding_type SCREAMING_SNAKE_CASE = backbone SCREAMING_SNAKE_CASE = use_pretrained_backbone SCREAMING_SNAKE_CASE = dilation # Hungarian matcher SCREAMING_SNAKE_CASE = class_cost SCREAMING_SNAKE_CASE = bbox_cost SCREAMING_SNAKE_CASE = giou_cost # Loss coefficients SCREAMING_SNAKE_CASE = mask_loss_coefficient SCREAMING_SNAKE_CASE = dice_loss_coefficient SCREAMING_SNAKE_CASE = bbox_loss_coefficient SCREAMING_SNAKE_CASE = giou_loss_coefficient SCREAMING_SNAKE_CASE = eos_coefficient super().__init__(is_encoder_decoder=__lowerCamelCase , **__lowerCamelCase ) @property def _snake_case ( self : Any ): return self.encoder_attention_heads @property def _snake_case ( self : Tuple ): return self.d_model @classmethod def _snake_case ( cls : Tuple , __lowerCamelCase : PretrainedConfig , **__lowerCamelCase : Any ): return cls(backbone_config=__lowerCamelCase , **__lowerCamelCase ) def _snake_case ( self : List[str] ): SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ ) if output["backbone_config"] is not None: SCREAMING_SNAKE_CASE = self.backbone_config.to_dict() SCREAMING_SNAKE_CASE = self.__class__.model_type return output class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = version.parse("1.11" ) @property def _snake_case ( self : Any ): return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ("pixel_mask", {0: "batch"}), ] ) @property def _snake_case ( self : List[str] ): return 1e-5 @property def _snake_case ( self : Optional[int] ): return 12
698
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) __A : Union[str, Any] = {'configuration_fnet': ['FNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FNetConfig']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : List[str] = ['FNetTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Any = ['FNetTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Union[str, Any] = [ 'FNET_PRETRAINED_MODEL_ARCHIVE_LIST', 'FNetForMaskedLM', 'FNetForMultipleChoice', 'FNetForNextSentencePrediction', 'FNetForPreTraining', 'FNetForQuestionAnswering', 'FNetForSequenceClassification', 'FNetForTokenClassification', 'FNetLayer', 'FNetModel', 'FNetPreTrainedModel', ] if TYPE_CHECKING: from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet import FNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet_fast import FNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_fnet import ( FNET_PRETRAINED_MODEL_ARCHIVE_LIST, FNetForMaskedLM, FNetForMultipleChoice, FNetForNextSentencePrediction, FNetForPreTraining, FNetForQuestionAnswering, FNetForSequenceClassification, FNetForTokenClassification, FNetLayer, FNetModel, FNetPreTrainedModel, ) else: import sys __A : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
698
1
from math import isqrt, loga def __a ( A__ : int ): SCREAMING_SNAKE_CASE = [True] * max_number for i in range(2 , isqrt(max_number - 1 ) + 1 ): if is_prime[i]: for j in range(i**2 , A__ , A__ ): SCREAMING_SNAKE_CASE = False return [i for i in range(2 , A__ ) if is_prime[i]] def __a ( A__ : int = 800800 , A__ : int = 800800 ): SCREAMING_SNAKE_CASE = degree * loga(A__ ) SCREAMING_SNAKE_CASE = int(A__ ) SCREAMING_SNAKE_CASE = calculate_prime_numbers(A__ ) SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = len(A__ ) - 1 while left < right: while ( prime_numbers[right] * loga(prime_numbers[left] ) + prime_numbers[left] * loga(prime_numbers[right] ) > upper_bound ): right -= 1 hybrid_integers_count += right - left left += 1 return hybrid_integers_count if __name__ == "__main__": print(f'{solution() = }')
698
from collections import Counter import numpy as np from sklearn import datasets from sklearn.model_selection import train_test_split __A : Optional[Any] = datasets.load_iris() __A : Optional[Any] = np.array(data['data']) __A : Optional[int] = np.array(data['target']) __A : Union[str, Any] = data['target_names'] __A , __A , __A , __A : Optional[int] = train_test_split(X, y) def __a ( A__ : Optional[int] , A__ : Dict ): return np.linalg.norm(np.array(A__ ) - np.array(A__ ) ) def __a ( A__ : Optional[Any] , A__ : int , A__ : Dict , A__ : Optional[Any] , A__ : Dict=5 ): SCREAMING_SNAKE_CASE = zip(A__ , A__ ) # List of distances of all points from the point to be classified SCREAMING_SNAKE_CASE = [] for data_point in data: SCREAMING_SNAKE_CASE = euclidean_distance(data_point[0] , A__ ) distances.append((distance, data_point[1]) ) # Choosing 'k' points with the least distances. SCREAMING_SNAKE_CASE = [i[1] for i in sorted(A__ )[:k]] # Most commonly occurring class among them # is the class into which the point is classified SCREAMING_SNAKE_CASE = Counter(A__ ).most_common(1 )[0][0] return classes[result] if __name__ == "__main__": print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
698
1
# Lint as: python3 import os import re import urllib.parse from pathlib import Path from typing import Callable, List, Optional, Union from zipfile import ZipFile from ..utils.file_utils import cached_path, hf_github_url from ..utils.logging import get_logger from ..utils.version import Version __A : Dict = get_logger(__name__) class _SCREAMING_SNAKE_CASE : '''simple docstring''' lowerCamelCase__ = "dummy_data" lowerCamelCase__ = "datasets" lowerCamelCase__ = False def __init__( self : Dict , __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : Union[Version, str] , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : bool = False , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[List[Callable]] = None , ): SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = dataset_name SCREAMING_SNAKE_CASE = cache_dir SCREAMING_SNAKE_CASE = use_local_dummy_data SCREAMING_SNAKE_CASE = config # download_callbacks take a single url as input SCREAMING_SNAKE_CASE = download_callbacks or [] # if False, it doesn't load existing files and it returns the paths of the dummy files relative # to the dummy_data zip file root SCREAMING_SNAKE_CASE = load_existing_dummy_data # TODO(PVP, QL) might need to make this more general SCREAMING_SNAKE_CASE = str(__lowerCamelCase ) # to be downloaded SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None @property def _snake_case ( self : Union[str, Any] ): if self._dummy_file is None: SCREAMING_SNAKE_CASE = self.download_dummy_data() return self._dummy_file @property def _snake_case ( self : Union[str, Any] ): if self.config is not None: # structure is dummy / config_name / version_name return os.path.join("dummy" , self.config.name , self.version_name ) # structure is dummy / version_name return os.path.join("dummy" , self.version_name ) @property def _snake_case ( self : int ): return os.path.join(self.dummy_data_folder , "dummy_data.zip" ) def _snake_case ( self : Optional[Any] ): SCREAMING_SNAKE_CASE = ( self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data ) SCREAMING_SNAKE_CASE = cached_path( __lowerCamelCase , cache_dir=self.cache_dir , extract_compressed_file=__lowerCamelCase , force_extract=__lowerCamelCase ) return os.path.join(__lowerCamelCase , self.dummy_file_name ) @property def _snake_case ( self : Union[str, Any] ): return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file ) @property def _snake_case ( self : int ): if self._bucket_url is None: SCREAMING_SNAKE_CASE = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , "/" ) ) return self._bucket_url @property def _snake_case ( self : Any ): # return full path if its a dir if os.path.isdir(self.dummy_file ): return self.dummy_file # else cut off path to file -> example `xsum`. return "/".join(self.dummy_file.replace(os.sep , "/" ).split("/" )[:-1] ) def _snake_case ( self : List[str] , __lowerCamelCase : str , *__lowerCamelCase : Dict ): if self.load_existing_dummy_data: # dummy data is downloaded and tested SCREAMING_SNAKE_CASE = self.dummy_file else: # dummy data cannot be downloaded and only the path to dummy file is returned SCREAMING_SNAKE_CASE = self.dummy_file_name # special case when data_url is a dict if isinstance(__lowerCamelCase , __lowerCamelCase ): return self.create_dummy_data_dict(__lowerCamelCase , __lowerCamelCase ) elif isinstance(__lowerCamelCase , (list, tuple) ): return self.create_dummy_data_list(__lowerCamelCase , __lowerCamelCase ) else: return self.create_dummy_data_single(__lowerCamelCase , __lowerCamelCase ) def _snake_case ( self : List[str] , __lowerCamelCase : List[str] , *__lowerCamelCase : Optional[Any] ): return self.download_and_extract(__lowerCamelCase ) def _snake_case ( self : int , __lowerCamelCase : str , __lowerCamelCase : Dict ): return self.download_and_extract(__lowerCamelCase ) def _snake_case ( self : int , __lowerCamelCase : Union[str, Any] , *__lowerCamelCase : Tuple , **__lowerCamelCase : List[str] ): return path def _snake_case ( self : Dict ): return {} def _snake_case ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : Optional[int] ): SCREAMING_SNAKE_CASE = {} for key, single_urls in data_url.items(): for download_callback in self.download_callbacks: if isinstance(__lowerCamelCase , __lowerCamelCase ): for single_url in single_urls: download_callback(__lowerCamelCase ) else: SCREAMING_SNAKE_CASE = single_urls download_callback(__lowerCamelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus if isinstance(__lowerCamelCase , __lowerCamelCase ): SCREAMING_SNAKE_CASE = [os.path.join(__lowerCamelCase , urllib.parse.quote_plus(Path(__lowerCamelCase ).name ) ) for x in single_urls] else: SCREAMING_SNAKE_CASE = single_urls SCREAMING_SNAKE_CASE = os.path.join(__lowerCamelCase , urllib.parse.quote_plus(Path(__lowerCamelCase ).name ) ) SCREAMING_SNAKE_CASE = value # make sure that values are unique if all(isinstance(__lowerCamelCase , __lowerCamelCase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len( dummy_data_dict.values() ): # append key to value to make its name unique SCREAMING_SNAKE_CASE = {key: value + key for key, value in dummy_data_dict.items()} return dummy_data_dict def _snake_case ( self : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any ): SCREAMING_SNAKE_CASE = [] # trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one SCREAMING_SNAKE_CASE = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" , __lowerCamelCase ) ) for url in data_url ) SCREAMING_SNAKE_CASE = all( url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url ) if data_url and (is_tf_records or is_pubmed_records): SCREAMING_SNAKE_CASE = [data_url[0]] * len(__lowerCamelCase ) for single_url in data_url: for download_callback in self.download_callbacks: download_callback(__lowerCamelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus SCREAMING_SNAKE_CASE = os.path.join(__lowerCamelCase , urllib.parse.quote_plus(single_url.split("/" )[-1] ) ) dummy_data_list.append(__lowerCamelCase ) return dummy_data_list def _snake_case ( self : int , __lowerCamelCase : Dict , __lowerCamelCase : str ): for download_callback in self.download_callbacks: download_callback(__lowerCamelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus SCREAMING_SNAKE_CASE = os.path.join(__lowerCamelCase , urllib.parse.quote_plus(data_url.split("/" )[-1] ) ) if os.path.exists(__lowerCamelCase ) or not self.load_existing_dummy_data: return value else: # Backward compatibility, maybe deprecate at one point. # For many datasets with single url calls to dl_manager.download_and_extract, # the dummy_data.zip file is actually the zipped downloaded file # while now we expected the dummy_data.zip file to be a directory containing # the downloaded file. return path_to_dummy_data def _snake_case ( self : Tuple ): pass def _snake_case ( self : Dict ): pass def _snake_case ( self : str , __lowerCamelCase : Optional[int] ): def _iter_archive_members(__lowerCamelCase : List[str] ): # this preserves the order of the members inside the ZIP archive SCREAMING_SNAKE_CASE = Path(self.dummy_file ).parent SCREAMING_SNAKE_CASE = path.relative_to(__lowerCamelCase ) with ZipFile(self.local_path_to_dummy_data ) as zip_file: SCREAMING_SNAKE_CASE = zip_file.namelist() for member in members: if member.startswith(relative_path.as_posix() ): yield dummy_parent_path.joinpath(__lowerCamelCase ) SCREAMING_SNAKE_CASE = Path(__lowerCamelCase ) SCREAMING_SNAKE_CASE = _iter_archive_members(__lowerCamelCase ) if self.use_local_dummy_data else path.rglob("*" ) for file_path in file_paths: if file_path.is_file() and not file_path.name.startswith((".", "__") ): yield file_path.relative_to(__lowerCamelCase ).as_posix(), file_path.open("rb" ) def _snake_case ( self : List[str] , __lowerCamelCase : Any ): if not isinstance(__lowerCamelCase , __lowerCamelCase ): SCREAMING_SNAKE_CASE = [paths] for path in paths: if os.path.isfile(__lowerCamelCase ): if os.path.basename(__lowerCamelCase ).startswith((".", "__") ): return yield path else: for dirpath, dirnames, filenames in os.walk(__lowerCamelCase ): if os.path.basename(__lowerCamelCase ).startswith((".", "__") ): continue dirnames.sort() for filename in sorted(__lowerCamelCase ): if filename.startswith((".", "__") ): continue yield os.path.join(__lowerCamelCase , __lowerCamelCase )
698
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __A : Union[str, Any] = logging.get_logger(__name__) __A : Dict = { 'microsoft/resnet-50': 'https://huggingface.co/microsoft/resnet-50/blob/main/config.json', } class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ): '''simple docstring''' lowerCamelCase__ = "resnet" lowerCamelCase__ = ["basic", "bottleneck"] def __init__( self : Optional[Any] , __lowerCamelCase : int=3 , __lowerCamelCase : Dict=64 , __lowerCamelCase : str=[256, 512, 1024, 2048] , __lowerCamelCase : str=[3, 4, 6, 3] , __lowerCamelCase : Optional[int]="bottleneck" , __lowerCamelCase : int="relu" , __lowerCamelCase : List[str]=False , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : int=None , **__lowerCamelCase : Dict , ): super().__init__(**__lowerCamelCase ) if layer_type not in self.layer_types: raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" ) SCREAMING_SNAKE_CASE = num_channels SCREAMING_SNAKE_CASE = embedding_size SCREAMING_SNAKE_CASE = hidden_sizes SCREAMING_SNAKE_CASE = depths SCREAMING_SNAKE_CASE = layer_type SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = downsample_in_first_stage SCREAMING_SNAKE_CASE = ["stem"] + [f"stage{idx}" for idx in range(1 , len(__lowerCamelCase ) + 1 )] SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_aligned_output_features_output_indices( out_features=__lowerCamelCase , out_indices=__lowerCamelCase , stage_names=self.stage_names ) class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = version.parse("1.11" ) @property def _snake_case ( self : Optional[int] ): return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def _snake_case ( self : Optional[int] ): return 1e-3
698
1
import torch from diffusers import DPMSolverSDEScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import require_torchsde from .test_schedulers import SchedulerCommonTest @require_torchsde class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = (DPMSolverSDEScheduler,) lowerCamelCase__ = 1_0 def _snake_case ( self : Union[str, Any] , **__lowerCamelCase : Tuple ): SCREAMING_SNAKE_CASE = { "num_train_timesteps": 1100, "beta_start": 0.0_001, "beta_end": 0.02, "beta_schedule": "linear", "noise_sampler_seed": 0, } config.update(**__lowerCamelCase ) return config def _snake_case ( self : List[Any] ): for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=__lowerCamelCase ) def _snake_case ( self : Union[str, Any] ): for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ): self.check_over_configs(beta_start=__lowerCamelCase , beta_end=__lowerCamelCase ) def _snake_case ( self : Union[str, Any] ): for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=__lowerCamelCase ) def _snake_case ( self : str ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__lowerCamelCase ) def _snake_case ( self : int ): SCREAMING_SNAKE_CASE = self.scheduler_classes[0] SCREAMING_SNAKE_CASE = self.get_scheduler_config() SCREAMING_SNAKE_CASE = scheduler_class(**__lowerCamelCase ) scheduler.set_timesteps(self.num_inference_steps ) SCREAMING_SNAKE_CASE = self.dummy_model() SCREAMING_SNAKE_CASE = self.dummy_sample_deter * scheduler.init_noise_sigma SCREAMING_SNAKE_CASE = sample.to(__lowerCamelCase ) for i, t in enumerate(scheduler.timesteps ): SCREAMING_SNAKE_CASE = scheduler.scale_model_input(__lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE = model(__lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE = output.prev_sample SCREAMING_SNAKE_CASE = torch.sum(torch.abs(__lowerCamelCase ) ) SCREAMING_SNAKE_CASE = torch.mean(torch.abs(__lowerCamelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.47_821_044_921_875 ) < 1e-2 assert abs(result_mean.item() - 0.2_178_705_964_565_277 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59_352_111_816_406 ) < 1e-2 assert abs(result_mean.item() - 0.22_342_906_892_299_652 ) < 1e-3 else: assert abs(result_sum.item() - 162.52_383_422_851_562 ) < 1e-2 assert abs(result_mean.item() - 0.211_619_570_851_326 ) < 1e-3 def _snake_case ( self : str ): SCREAMING_SNAKE_CASE = self.scheduler_classes[0] SCREAMING_SNAKE_CASE = self.get_scheduler_config(prediction_type="v_prediction" ) SCREAMING_SNAKE_CASE = scheduler_class(**__lowerCamelCase ) scheduler.set_timesteps(self.num_inference_steps ) SCREAMING_SNAKE_CASE = self.dummy_model() SCREAMING_SNAKE_CASE = self.dummy_sample_deter * scheduler.init_noise_sigma SCREAMING_SNAKE_CASE = sample.to(__lowerCamelCase ) for i, t in enumerate(scheduler.timesteps ): SCREAMING_SNAKE_CASE = scheduler.scale_model_input(__lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE = model(__lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE = output.prev_sample SCREAMING_SNAKE_CASE = torch.sum(torch.abs(__lowerCamelCase ) ) SCREAMING_SNAKE_CASE = torch.mean(torch.abs(__lowerCamelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 124.77_149_200_439_453 ) < 1e-2 assert abs(result_mean.item() - 0.16_226_289_014_816_284 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 128.1_663_360_595_703 ) < 1e-2 assert abs(result_mean.item() - 0.16_688_326_001_167_297 ) < 1e-3 else: assert abs(result_sum.item() - 119.8_487_548_828_125 ) < 1e-2 assert abs(result_mean.item() - 0.1_560_530_662_536_621 ) < 1e-3 def _snake_case ( self : str ): SCREAMING_SNAKE_CASE = self.scheduler_classes[0] SCREAMING_SNAKE_CASE = self.get_scheduler_config() SCREAMING_SNAKE_CASE = scheduler_class(**__lowerCamelCase ) scheduler.set_timesteps(self.num_inference_steps , device=__lowerCamelCase ) SCREAMING_SNAKE_CASE = self.dummy_model() SCREAMING_SNAKE_CASE = self.dummy_sample_deter.to(__lowerCamelCase ) * scheduler.init_noise_sigma for t in scheduler.timesteps: SCREAMING_SNAKE_CASE = scheduler.scale_model_input(__lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE = model(__lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE = output.prev_sample SCREAMING_SNAKE_CASE = torch.sum(torch.abs(__lowerCamelCase ) ) SCREAMING_SNAKE_CASE = torch.mean(torch.abs(__lowerCamelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.46_957_397_460_938 ) < 1e-2 assert abs(result_mean.item() - 0.21_805_934_607_982_635 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59_353_637_695_312 ) < 1e-2 assert abs(result_mean.item() - 0.22_342_908_382_415_771 ) < 1e-3 else: assert abs(result_sum.item() - 162.52_383_422_851_562 ) < 1e-2 assert abs(result_mean.item() - 0.211_619_570_851_326 ) < 1e-3 def _snake_case ( self : Dict ): SCREAMING_SNAKE_CASE = self.scheduler_classes[0] SCREAMING_SNAKE_CASE = self.get_scheduler_config() SCREAMING_SNAKE_CASE = scheduler_class(**__lowerCamelCase , use_karras_sigmas=__lowerCamelCase ) scheduler.set_timesteps(self.num_inference_steps , device=__lowerCamelCase ) SCREAMING_SNAKE_CASE = self.dummy_model() SCREAMING_SNAKE_CASE = self.dummy_sample_deter.to(__lowerCamelCase ) * scheduler.init_noise_sigma SCREAMING_SNAKE_CASE = sample.to(__lowerCamelCase ) for t in scheduler.timesteps: SCREAMING_SNAKE_CASE = scheduler.scale_model_input(__lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE = model(__lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE = output.prev_sample SCREAMING_SNAKE_CASE = torch.sum(torch.abs(__lowerCamelCase ) ) SCREAMING_SNAKE_CASE = torch.mean(torch.abs(__lowerCamelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 176.66_974_135_742_188 ) < 1e-2 assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1e-2 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 177.63_653_564_453_125 ) < 1e-2 assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1e-2 else: assert abs(result_sum.item() - 170.3_135_223_388_672 ) < 1e-2 assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1e-2
698
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, TensorType __A : str = logging.get_logger(__name__) __A : Optional[Any] = { 'openai/imagegpt-small': '', 'openai/imagegpt-medium': '', 'openai/imagegpt-large': '', } class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = "imagegpt" lowerCamelCase__ = ["past_key_values"] lowerCamelCase__ = { "hidden_size": "n_embd", "max_position_embeddings": "n_positions", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self : List[str] , __lowerCamelCase : Any=512 + 1 , __lowerCamelCase : str=32 * 32 , __lowerCamelCase : Any=512 , __lowerCamelCase : Optional[int]=24 , __lowerCamelCase : Tuple=8 , __lowerCamelCase : List[str]=None , __lowerCamelCase : List[Any]="quick_gelu" , __lowerCamelCase : int=0.1 , __lowerCamelCase : str=0.1 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Optional[Any]=1e-5 , __lowerCamelCase : Optional[Any]=0.02 , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : List[Any]=False , __lowerCamelCase : int=False , __lowerCamelCase : int=False , **__lowerCamelCase : Union[str, Any] , ): SCREAMING_SNAKE_CASE = vocab_size SCREAMING_SNAKE_CASE = n_positions SCREAMING_SNAKE_CASE = n_embd SCREAMING_SNAKE_CASE = n_layer SCREAMING_SNAKE_CASE = n_head SCREAMING_SNAKE_CASE = n_inner SCREAMING_SNAKE_CASE = activation_function SCREAMING_SNAKE_CASE = resid_pdrop SCREAMING_SNAKE_CASE = embd_pdrop SCREAMING_SNAKE_CASE = attn_pdrop SCREAMING_SNAKE_CASE = layer_norm_epsilon SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = scale_attn_weights SCREAMING_SNAKE_CASE = use_cache SCREAMING_SNAKE_CASE = scale_attn_by_inverse_layer_idx SCREAMING_SNAKE_CASE = reorder_and_upcast_attn SCREAMING_SNAKE_CASE = tie_word_embeddings super().__init__(tie_word_embeddings=__lowerCamelCase , **__lowerCamelCase ) class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' @property def _snake_case ( self : Optional[Any] ): return OrderedDict( [ ("input_ids", {0: "batch", 1: "sequence"}), ] ) def _snake_case ( self : Optional[int] , __lowerCamelCase : "FeatureExtractionMixin" , __lowerCamelCase : int = 1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional["TensorType"] = None , __lowerCamelCase : int = 3 , __lowerCamelCase : int = 32 , __lowerCamelCase : int = 32 , ): SCREAMING_SNAKE_CASE = self._generate_dummy_images(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE = dict(preprocessor(images=__lowerCamelCase , return_tensors=__lowerCamelCase ) ) return inputs
698
1
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __A : Union[str, Any] = logging.get_logger(__name__) __A : Dict = { 'microsoft/resnet-50': 'https://huggingface.co/microsoft/resnet-50/blob/main/config.json', } class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ): '''simple docstring''' lowerCamelCase__ = "resnet" lowerCamelCase__ = ["basic", "bottleneck"] def __init__( self : Optional[Any] , __lowerCamelCase : int=3 , __lowerCamelCase : Dict=64 , __lowerCamelCase : str=[256, 512, 1024, 2048] , __lowerCamelCase : str=[3, 4, 6, 3] , __lowerCamelCase : Optional[int]="bottleneck" , __lowerCamelCase : int="relu" , __lowerCamelCase : List[str]=False , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : int=None , **__lowerCamelCase : Dict , ): super().__init__(**__lowerCamelCase ) if layer_type not in self.layer_types: raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" ) SCREAMING_SNAKE_CASE = num_channels SCREAMING_SNAKE_CASE = embedding_size SCREAMING_SNAKE_CASE = hidden_sizes SCREAMING_SNAKE_CASE = depths SCREAMING_SNAKE_CASE = layer_type SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = downsample_in_first_stage SCREAMING_SNAKE_CASE = ["stem"] + [f"stage{idx}" for idx in range(1 , len(__lowerCamelCase ) + 1 )] SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_aligned_output_features_output_indices( out_features=__lowerCamelCase , out_indices=__lowerCamelCase , stage_names=self.stage_names ) class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = version.parse("1.11" ) @property def _snake_case ( self : Optional[int] ): return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def _snake_case ( self : Optional[int] ): return 1e-3
698
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments from transformers.testing_utils import TestCasePlus, require_torch, slow from transformers.utils import is_datasets_available if is_datasets_available(): import datasets class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' @slow @require_torch def _snake_case ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE = EncoderDecoderModel.from_encoder_decoder_pretrained("prajjwal1/bert-tiny" , "prajjwal1/bert-tiny" ) SCREAMING_SNAKE_CASE = BertTokenizer.from_pretrained("bert-base-uncased" ) SCREAMING_SNAKE_CASE = bertabert.config.encoder.vocab_size SCREAMING_SNAKE_CASE = tokenizer.sep_token_id SCREAMING_SNAKE_CASE = tokenizer.cls_token_id SCREAMING_SNAKE_CASE = 128 SCREAMING_SNAKE_CASE = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="train[:1%]" ) SCREAMING_SNAKE_CASE = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="validation[:1%]" ) SCREAMING_SNAKE_CASE = train_dataset.select(range(32 ) ) SCREAMING_SNAKE_CASE = val_dataset.select(range(16 ) ) SCREAMING_SNAKE_CASE = 4 def _map_to_encoder_decoder_inputs(__lowerCamelCase : str ): # Tokenizer will automatically set [BOS] <text> [EOS] SCREAMING_SNAKE_CASE = tokenizer(batch["article"] , padding="max_length" , truncation=__lowerCamelCase , max_length=512 ) SCREAMING_SNAKE_CASE = tokenizer(batch["highlights"] , padding="max_length" , truncation=__lowerCamelCase , max_length=128 ) SCREAMING_SNAKE_CASE = inputs.input_ids SCREAMING_SNAKE_CASE = inputs.attention_mask SCREAMING_SNAKE_CASE = outputs.input_ids SCREAMING_SNAKE_CASE = outputs.input_ids.copy() SCREAMING_SNAKE_CASE = [ [-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["labels"] ] SCREAMING_SNAKE_CASE = outputs.attention_mask assert all(len(__lowerCamelCase ) == 512 for x in inputs.input_ids ) assert all(len(__lowerCamelCase ) == 128 for x in outputs.input_ids ) return batch def _compute_metrics(__lowerCamelCase : Union[str, Any] ): SCREAMING_SNAKE_CASE = pred.label_ids SCREAMING_SNAKE_CASE = pred.predictions # all unnecessary tokens are removed SCREAMING_SNAKE_CASE = tokenizer.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase ) SCREAMING_SNAKE_CASE = tokenizer.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase ) SCREAMING_SNAKE_CASE = sum([int(pred_str[i] == label_str[i] ) for i in range(len(__lowerCamelCase ) )] ) / len(__lowerCamelCase ) return {"accuracy": accuracy} # map train dataset SCREAMING_SNAKE_CASE = train_dataset.map( _map_to_encoder_decoder_inputs , batched=__lowerCamelCase , batch_size=__lowerCamelCase , remove_columns=["article", "highlights"] , ) train_dataset.set_format( type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , ) # same for validation dataset SCREAMING_SNAKE_CASE = val_dataset.map( _map_to_encoder_decoder_inputs , batched=__lowerCamelCase , batch_size=__lowerCamelCase , remove_columns=["article", "highlights"] , ) val_dataset.set_format( type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , ) SCREAMING_SNAKE_CASE = self.get_auto_remove_tmp_dir() SCREAMING_SNAKE_CASE = SeqaSeqTrainingArguments( output_dir=__lowerCamelCase , per_device_train_batch_size=__lowerCamelCase , per_device_eval_batch_size=__lowerCamelCase , predict_with_generate=__lowerCamelCase , evaluation_strategy="steps" , do_train=__lowerCamelCase , do_eval=__lowerCamelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , ) # instantiate trainer SCREAMING_SNAKE_CASE = SeqaSeqTrainer( model=__lowerCamelCase , args=__lowerCamelCase , compute_metrics=_compute_metrics , train_dataset=__lowerCamelCase , eval_dataset=__lowerCamelCase , tokenizer=__lowerCamelCase , ) # start training trainer.train()
698
1
def __a ( A__ : int ): return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number if __name__ == "__main__": print('Program to check whether a number is a Perfect number or not...') __A : Dict = int(input('Enter number: ').strip()) print(f'{number} is {"" if perfect(number) else "not "}a Perfect Number.')
698
import inspect import unittest import warnings from math import ceil, floor from transformers import LevitConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_MAPPING, LevitForImageClassification, LevitForImageClassificationWithTeacher, LevitModel, ) from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import LevitImageProcessor class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' def _snake_case ( self : Optional[Any] ): SCREAMING_SNAKE_CASE = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(__lowerCamelCase , "hidden_sizes" ) ) self.parent.assertTrue(hasattr(__lowerCamelCase , "num_attention_heads" ) ) class _SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any]=13 , __lowerCamelCase : Optional[int]=64 , __lowerCamelCase : Dict=3 , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : int=2 , __lowerCamelCase : List[Any]=1 , __lowerCamelCase : Optional[Any]=16 , __lowerCamelCase : Tuple=[128, 256, 384] , __lowerCamelCase : int=[4, 6, 8] , __lowerCamelCase : Tuple=[2, 3, 4] , __lowerCamelCase : List[str]=[16, 16, 16] , __lowerCamelCase : int=0 , __lowerCamelCase : List[Any]=[2, 2, 2] , __lowerCamelCase : List[str]=[2, 2, 2] , __lowerCamelCase : List[str]=0.02 , __lowerCamelCase : Any=True , __lowerCamelCase : Dict=True , __lowerCamelCase : int=2 , ): SCREAMING_SNAKE_CASE = parent SCREAMING_SNAKE_CASE = batch_size SCREAMING_SNAKE_CASE = image_size SCREAMING_SNAKE_CASE = num_channels SCREAMING_SNAKE_CASE = kernel_size SCREAMING_SNAKE_CASE = stride SCREAMING_SNAKE_CASE = padding SCREAMING_SNAKE_CASE = hidden_sizes SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = depths SCREAMING_SNAKE_CASE = key_dim SCREAMING_SNAKE_CASE = drop_path_rate SCREAMING_SNAKE_CASE = patch_size SCREAMING_SNAKE_CASE = attention_ratio SCREAMING_SNAKE_CASE = mlp_ratio SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = [ ["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] SCREAMING_SNAKE_CASE = is_training SCREAMING_SNAKE_CASE = use_labels SCREAMING_SNAKE_CASE = num_labels SCREAMING_SNAKE_CASE = initializer_range def _snake_case ( self : int ): SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE = None if self.use_labels: SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels ) SCREAMING_SNAKE_CASE = self.get_config() return config, pixel_values, labels def _snake_case ( self : List[str] ): return LevitConfig( image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , ) def _snake_case ( self : Optional[int] , __lowerCamelCase : List[str] , __lowerCamelCase : str , __lowerCamelCase : Any ): SCREAMING_SNAKE_CASE = LevitModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() SCREAMING_SNAKE_CASE = model(__lowerCamelCase ) SCREAMING_SNAKE_CASE = (self.image_size, self.image_size) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = image_size[0], image_size[1] for _ in range(4 ): SCREAMING_SNAKE_CASE = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 ) SCREAMING_SNAKE_CASE = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , ) def _snake_case ( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str] ): SCREAMING_SNAKE_CASE = self.num_labels SCREAMING_SNAKE_CASE = LevitForImageClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() SCREAMING_SNAKE_CASE = model(__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _snake_case ( self : Optional[Any] ): SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = config_and_inputs SCREAMING_SNAKE_CASE = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = ( (LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher) if is_torch_available() else () ) lowerCamelCase__ = ( { "feature-extraction": LevitModel, "image-classification": (LevitForImageClassification, LevitForImageClassificationWithTeacher), } if is_torch_available() else {} ) lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False def _snake_case ( self : Dict ): SCREAMING_SNAKE_CASE = LevitModelTester(self ) SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 ) def _snake_case ( self : Any ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _snake_case ( self : Any ): return @unittest.skip(reason="Levit does not use inputs_embeds" ) def _snake_case ( self : Union[str, Any] ): pass @unittest.skip(reason="Levit does not support input and output embeddings" ) def _snake_case ( self : Tuple ): pass @unittest.skip(reason="Levit does not output attentions" ) def _snake_case ( self : Tuple ): pass def _snake_case ( self : Dict ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase ) SCREAMING_SNAKE_CASE = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCamelCase ) def _snake_case ( self : List[Any] ): def check_hidden_states_output(__lowerCamelCase : List[str] , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] ): SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) ) SCREAMING_SNAKE_CASE = outputs.hidden_states SCREAMING_SNAKE_CASE = len(self.model_tester.depths ) + 1 self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase ) SCREAMING_SNAKE_CASE = (self.model_tester.image_size, self.model_tester.image_size) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = image_size[0], image_size[1] for _ in range(4 ): SCREAMING_SNAKE_CASE = floor( ( (height + 2 * self.model_tester.padding - self.model_tester.kernel_size) / self.model_tester.stride ) + 1 ) SCREAMING_SNAKE_CASE = floor( ( (width + 2 * self.model_tester.padding - self.model_tester.kernel_size) / self.model_tester.stride ) + 1 ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [ height * width, self.model_tester.hidden_sizes[0], ] , ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def _snake_case ( self : int ): pass def _snake_case ( self : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : int=False ): SCREAMING_SNAKE_CASE = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) if return_labels: if model_class.__name__ == "LevitForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def _snake_case ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def _snake_case ( self : str ): SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase ) def _snake_case ( self : List[Any] ): if not self.model_tester.is_training: return SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE = True for model_class in self.all_model_classes: # LevitForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(__lowerCamelCase ) or model_class.__name__ == "LevitForImageClassificationWithTeacher" ): continue SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.train() SCREAMING_SNAKE_CASE = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) SCREAMING_SNAKE_CASE = model(**__lowerCamelCase ).loss loss.backward() def _snake_case ( self : Tuple ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = True for model_class in self.all_model_classes: if model_class in get_values(__lowerCamelCase ) or not model_class.supports_gradient_checkpointing: continue # LevitForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "LevitForImageClassificationWithTeacher": continue SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase ) model.gradient_checkpointing_enable() model.to(__lowerCamelCase ) model.train() SCREAMING_SNAKE_CASE = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) SCREAMING_SNAKE_CASE = model(**__lowerCamelCase ).loss loss.backward() def _snake_case ( self : List[str] ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE = [ {"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float}, {"title": "single_label_classification", "num_labels": 1, "dtype": torch.long}, {"title": "regression", "num_labels": 1, "dtype": torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(__lowerCamelCase ), ] or model_class.__name__ == "LevitForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=f"Testing {model_class} with {problem_type['title']}" ): SCREAMING_SNAKE_CASE = problem_type["title"] SCREAMING_SNAKE_CASE = problem_type["num_labels"] SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.train() SCREAMING_SNAKE_CASE = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) if problem_type["num_labels"] > 1: SCREAMING_SNAKE_CASE = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] ) SCREAMING_SNAKE_CASE = inputs["labels"].to(problem_type["dtype"] ) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=__lowerCamelCase ) as warning_list: SCREAMING_SNAKE_CASE = model(**__lowerCamelCase ).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message ): raise ValueError( f"Something is going wrong in the regression problem: intercepted {w.message}" ) loss.backward() @slow def _snake_case ( self : List[Any] ): for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE = LevitModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) def __a ( ): SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' @cached_property def _snake_case ( self : Dict ): return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) @slow def _snake_case ( self : Optional[int] ): SCREAMING_SNAKE_CASE = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to( __lowerCamelCase ) SCREAMING_SNAKE_CASE = self.default_image_processor SCREAMING_SNAKE_CASE = prepare_img() SCREAMING_SNAKE_CASE = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE = model(**__lowerCamelCase ) # verify the logits SCREAMING_SNAKE_CASE = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , __lowerCamelCase ) SCREAMING_SNAKE_CASE = torch.tensor([1.0_448, -0.3_745, -1.8_317] ).to(__lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
698
1
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings from typing import List from unittest.mock import Mock import torch from torch.utils.data import DataLoader, IterableDataset, TensorDataset from accelerate.accelerator import Accelerator from accelerate.utils.dataclasses import DistributedType class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' def __init__( self : Tuple , __lowerCamelCase : Optional[Any] ): SCREAMING_SNAKE_CASE = data def __iter__( self : str ): for element in self.data: yield element def __a ( A__ : List[str]=True ): SCREAMING_SNAKE_CASE = Accelerator(even_batches=A__ ) assert accelerator.num_processes == 2, "this script expects that two GPUs are available" return accelerator def __a ( A__ : Accelerator , A__ : int , A__ : int , A__ : bool = False ): if iterable: SCREAMING_SNAKE_CASE = DummyIterableDataset(torch.as_tensor(range(A__ ) ) ) else: SCREAMING_SNAKE_CASE = TensorDataset(torch.as_tensor(range(A__ ) ) ) SCREAMING_SNAKE_CASE = DataLoader(A__ , batch_size=A__ ) SCREAMING_SNAKE_CASE = accelerator.prepare(A__ ) return dl def __a ( A__ : Accelerator , A__ : int , A__ : int , A__ : List[int] , A__ : List[int] , ): SCREAMING_SNAKE_CASE = create_dataloader(accelerator=A__ , dataset_size=A__ , batch_size=A__ ) SCREAMING_SNAKE_CASE = [len(batch[0] ) for batch in dl] if accelerator.process_index == 0: assert batch_sizes == process_0_expected_batch_sizes elif accelerator.process_index == 1: assert batch_sizes == process_1_expected_batch_sizes def __a ( ): SCREAMING_SNAKE_CASE = create_accelerator() # without padding, we would expect a different number of batches verify_dataloader_batch_sizes( A__ , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , ) # without padding, we would expect the same number of batches, but different sizes verify_dataloader_batch_sizes( A__ , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , ) def __a ( ): SCREAMING_SNAKE_CASE = create_accelerator(even_batches=A__ ) verify_dataloader_batch_sizes( A__ , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , ) verify_dataloader_batch_sizes( A__ , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , ) def __a ( ): SCREAMING_SNAKE_CASE = create_accelerator(even_batches=A__ ) SCREAMING_SNAKE_CASE = torch.nn.Linear(1 , 1 ) SCREAMING_SNAKE_CASE = accelerator.prepare(A__ ) SCREAMING_SNAKE_CASE = create_dataloader(A__ , dataset_size=3 , batch_size=1 ) SCREAMING_SNAKE_CASE = [] with accelerator.join_uneven_inputs([ddp_model] ): for batch_idx, batch in enumerate(A__ ): SCREAMING_SNAKE_CASE = ddp_model(batch[0].float() ) SCREAMING_SNAKE_CASE = output.sum() loss.backward() batch_idxs.append(A__ ) accelerator.wait_for_everyone() if accelerator.process_index == 0: assert batch_idxs == [0, 1] elif accelerator.process_index == 1: assert batch_idxs == [0] def __a ( A__ : List[Any] ): with warnings.catch_warnings(record=A__ ) as w: with accelerator.join_uneven_inputs([Mock()] ): pass assert issubclass(w[-1].category , A__ ) assert "only supported for multi-GPU" in str(w[-1].message ) def __a ( ): SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = create_accelerator(even_batches=A__ ) SCREAMING_SNAKE_CASE = torch.nn.Linear(1 , 1 ) SCREAMING_SNAKE_CASE = accelerator.prepare(A__ ) SCREAMING_SNAKE_CASE = create_dataloader(A__ , dataset_size=3 , batch_size=1 ) SCREAMING_SNAKE_CASE = create_dataloader(A__ , dataset_size=3 , batch_size=1 ) with accelerator.join_uneven_inputs([ddp_model] , even_batches=A__ ): SCREAMING_SNAKE_CASE = train_dl.batch_sampler.even_batches SCREAMING_SNAKE_CASE = valid_dl.batch_sampler.even_batches assert train_dl_overridden_value == overridden_even_batches assert valid_dl_overridden_value == overridden_even_batches assert train_dl.batch_sampler.even_batches == default_even_batches assert valid_dl.batch_sampler.even_batches == default_even_batches def __a ( ): SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = create_accelerator(even_batches=A__ ) SCREAMING_SNAKE_CASE = torch.nn.Linear(1 , 1 ) SCREAMING_SNAKE_CASE = accelerator.prepare(A__ ) create_dataloader(A__ , dataset_size=3 , batch_size=1 , iterable=A__ ) SCREAMING_SNAKE_CASE = create_dataloader(A__ , dataset_size=3 , batch_size=1 ) with warnings.catch_warnings(): warnings.filterwarnings("ignore" ) try: with accelerator.join_uneven_inputs([ddp_model] , even_batches=A__ ): SCREAMING_SNAKE_CASE = batch_dl.batch_sampler.even_batches except AttributeError: # ensure attribute error is not raised when processing iterable dl raise AssertionError assert batch_dl_overridden_value == overridden_even_batches assert batch_dl.batch_sampler.even_batches == default_even_batches def __a ( ): SCREAMING_SNAKE_CASE = create_accelerator() SCREAMING_SNAKE_CASE = torch.nn.Linear(1 , 1 ) SCREAMING_SNAKE_CASE = accelerator.prepare(A__ ) create_dataloader(A__ , dataset_size=3 , batch_size=1 , iterable=A__ ) with warnings.catch_warnings(record=A__ ) as w: with accelerator.join_uneven_inputs([ddp_model] , even_batches=A__ ): pass assert issubclass(w[-1].category , A__ ) assert "only supported for map-style datasets" in str(w[-1].message ) def __a ( ): SCREAMING_SNAKE_CASE = create_accelerator() accelerator.print("Test that even_batches variable ensures uniform batches across processes" ) test_default_ensures_even_batch_sizes() accelerator.print("Run tests with even_batches disabled" ) test_can_disable_even_batches() accelerator.print("Test joining uneven inputs" ) test_can_join_uneven_inputs() accelerator.print("Test overriding even_batches when joining uneven inputs" ) test_join_can_override_even_batches() accelerator.print("Test overriding even_batches for mixed dataloader types" ) test_join_can_override_for_mixed_type_dataloaders() accelerator.print("Test overriding even_batches raises a warning for iterable dataloaders" ) test_join_raises_warning_for_iterable_when_overriding_even_batches() accelerator.print("Test join with non DDP distributed raises warning" ) SCREAMING_SNAKE_CASE = accelerator.state.distributed_type SCREAMING_SNAKE_CASE = DistributedType.FSDP test_join_raises_warning_for_non_ddp_distributed(A__ ) SCREAMING_SNAKE_CASE = original_state if __name__ == "__main__": main()
698
import unittest from pathlib import Path from tempfile import NamedTemporaryFile, TemporaryDirectory from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline from transformers.convert_graph_to_onnx import ( convert, ensure_valid_input, generate_identified_filename, infer_shapes, quantize, ) from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow class _SCREAMING_SNAKE_CASE : '''simple docstring''' def _snake_case ( self : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any ): return None class _SCREAMING_SNAKE_CASE : '''simple docstring''' def _snake_case ( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : int ): return None class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = [ # (model_name, model_kwargs) ("bert-base-cased", {}), ("gpt2", {"use_cache": False}), # We don't support exporting GPT2 past keys anymore ] @require_tf @slow def _snake_case ( self : List[Any] ): for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(__lowerCamelCase , "tf" , 12 , **__lowerCamelCase ) @require_torch @slow def _snake_case ( self : List[Any] ): for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(__lowerCamelCase , "pt" , 12 , **__lowerCamelCase ) @require_torch @slow def _snake_case ( self : Optional[int] ): from transformers import BertModel SCREAMING_SNAKE_CASE = ["[UNK]", "[SEP]", "[CLS]", "[PAD]", "[MASK]", "some", "other", "words"] with NamedTemporaryFile(mode="w+t" ) as vocab_file: vocab_file.write("\n".join(__lowerCamelCase ) ) vocab_file.flush() SCREAMING_SNAKE_CASE = BertTokenizerFast(vocab_file.name ) with TemporaryDirectory() as bert_save_dir: SCREAMING_SNAKE_CASE = BertModel(BertConfig(vocab_size=len(__lowerCamelCase ) ) ) model.save_pretrained(__lowerCamelCase ) self._test_export(__lowerCamelCase , "pt" , 12 , __lowerCamelCase ) @require_tf @slow def _snake_case ( self : Any ): for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: SCREAMING_SNAKE_CASE = self._test_export(__lowerCamelCase , "tf" , 12 , **__lowerCamelCase ) SCREAMING_SNAKE_CASE = quantize(Path(__lowerCamelCase ) ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(__lowerCamelCase ).stat().st_size: self.fail("Quantized model is bigger than initial ONNX model" ) @require_torch @slow def _snake_case ( self : Any ): for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: SCREAMING_SNAKE_CASE = self._test_export(__lowerCamelCase , "pt" , 12 , **__lowerCamelCase ) SCREAMING_SNAKE_CASE = quantize(__lowerCamelCase ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(__lowerCamelCase ).stat().st_size: self.fail("Quantized model is bigger than initial ONNX model" ) def _snake_case ( self : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : int , __lowerCamelCase : Tuple=None , **__lowerCamelCase : List[str] ): try: # Compute path with TemporaryDirectory() as tempdir: SCREAMING_SNAKE_CASE = Path(__lowerCamelCase ).joinpath("model.onnx" ) # Remove folder if exists if path.parent.exists(): path.parent.rmdir() # Export convert(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ) return path except Exception as e: self.fail(__lowerCamelCase ) @require_torch @require_tokenizers @slow def _snake_case ( self : Dict ): from transformers import BertModel SCREAMING_SNAKE_CASE = BertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) ) SCREAMING_SNAKE_CASE = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" ) self._test_infer_dynamic_axis(__lowerCamelCase , __lowerCamelCase , "pt" ) @require_tf @require_tokenizers @slow def _snake_case ( self : int ): from transformers import TFBertModel SCREAMING_SNAKE_CASE = TFBertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) ) SCREAMING_SNAKE_CASE = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" ) self._test_infer_dynamic_axis(__lowerCamelCase , __lowerCamelCase , "tf" ) def _snake_case ( self : int , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str ): SCREAMING_SNAKE_CASE = FeatureExtractionPipeline(__lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE = ["input_ids", "token_type_ids", "attention_mask", "output_0", "output_1"] SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = infer_shapes(__lowerCamelCase , __lowerCamelCase ) # Assert all variables are present self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) ) self.assertTrue(all(var_name in shapes for var_name in variable_names ) ) self.assertSequenceEqual(variable_names[:3] , __lowerCamelCase ) self.assertSequenceEqual(variable_names[3:] , __lowerCamelCase ) # Assert inputs are {0: batch, 1: sequence} for var_name in ["input_ids", "token_type_ids", "attention_mask"]: self.assertDictEqual(shapes[var_name] , {0: "batch", 1: "sequence"} ) # Assert outputs are {0: batch, 1: sequence} and {0: batch} self.assertDictEqual(shapes["output_0"] , {0: "batch", 1: "sequence"} ) self.assertDictEqual(shapes["output_1"] , {0: "batch"} ) def _snake_case ( self : Optional[Any] ): SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask", "token_type_ids"] SCREAMING_SNAKE_CASE = {"input_ids": [1, 2, 3, 4], "attention_mask": [0, 0, 0, 0], "token_type_ids": [1, 1, 1, 1]} SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = ensure_valid_input(FuncContiguousArgs() , __lowerCamelCase , __lowerCamelCase ) # Should have exactly the same number of args (all are valid) self.assertEqual(len(__lowerCamelCase ) , 3 ) # Should have exactly the same input names self.assertEqual(set(__lowerCamelCase ) , set(__lowerCamelCase ) ) # Parameter should be reordered according to their respective place in the function: # (input_ids, token_type_ids, attention_mask) self.assertEqual(__lowerCamelCase , (tokens["input_ids"], tokens["token_type_ids"], tokens["attention_mask"]) ) # Generated args are interleaved with another args (for instance parameter "past" in GPT2) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = ensure_valid_input(FuncNonContiguousArgs() , __lowerCamelCase , __lowerCamelCase ) # Should have exactly the one arg (all before the one not provided "some_other_args") self.assertEqual(len(__lowerCamelCase ) , 1 ) self.assertEqual(len(__lowerCamelCase ) , 1 ) # Should have only "input_ids" self.assertEqual(inputs_args[0] , tokens["input_ids"] ) self.assertEqual(ordered_input_names[0] , "input_ids" ) def _snake_case ( self : Optional[int] ): SCREAMING_SNAKE_CASE = generate_identified_filename(Path("/home/something/my_fake_model.onnx" ) , "-test" ) self.assertEqual("/home/something/my_fake_model-test.onnx" , generated.as_posix() )
698
1
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, TensorType __A : str = logging.get_logger(__name__) __A : Optional[Any] = { 'openai/imagegpt-small': '', 'openai/imagegpt-medium': '', 'openai/imagegpt-large': '', } class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = "imagegpt" lowerCamelCase__ = ["past_key_values"] lowerCamelCase__ = { "hidden_size": "n_embd", "max_position_embeddings": "n_positions", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self : List[str] , __lowerCamelCase : Any=512 + 1 , __lowerCamelCase : str=32 * 32 , __lowerCamelCase : Any=512 , __lowerCamelCase : Optional[int]=24 , __lowerCamelCase : Tuple=8 , __lowerCamelCase : List[str]=None , __lowerCamelCase : List[Any]="quick_gelu" , __lowerCamelCase : int=0.1 , __lowerCamelCase : str=0.1 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Optional[Any]=1e-5 , __lowerCamelCase : Optional[Any]=0.02 , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : List[Any]=False , __lowerCamelCase : int=False , __lowerCamelCase : int=False , **__lowerCamelCase : Union[str, Any] , ): SCREAMING_SNAKE_CASE = vocab_size SCREAMING_SNAKE_CASE = n_positions SCREAMING_SNAKE_CASE = n_embd SCREAMING_SNAKE_CASE = n_layer SCREAMING_SNAKE_CASE = n_head SCREAMING_SNAKE_CASE = n_inner SCREAMING_SNAKE_CASE = activation_function SCREAMING_SNAKE_CASE = resid_pdrop SCREAMING_SNAKE_CASE = embd_pdrop SCREAMING_SNAKE_CASE = attn_pdrop SCREAMING_SNAKE_CASE = layer_norm_epsilon SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = scale_attn_weights SCREAMING_SNAKE_CASE = use_cache SCREAMING_SNAKE_CASE = scale_attn_by_inverse_layer_idx SCREAMING_SNAKE_CASE = reorder_and_upcast_attn SCREAMING_SNAKE_CASE = tie_word_embeddings super().__init__(tie_word_embeddings=__lowerCamelCase , **__lowerCamelCase ) class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' @property def _snake_case ( self : Optional[Any] ): return OrderedDict( [ ("input_ids", {0: "batch", 1: "sequence"}), ] ) def _snake_case ( self : Optional[int] , __lowerCamelCase : "FeatureExtractionMixin" , __lowerCamelCase : int = 1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional["TensorType"] = None , __lowerCamelCase : int = 3 , __lowerCamelCase : int = 32 , __lowerCamelCase : int = 32 , ): SCREAMING_SNAKE_CASE = self._generate_dummy_images(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE = dict(preprocessor(images=__lowerCamelCase , return_tensors=__lowerCamelCase ) ) return inputs
698
from manim import * class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' def _snake_case ( self : List[Any] ): SCREAMING_SNAKE_CASE = Rectangle(height=0.5 , width=0.5 ) SCREAMING_SNAKE_CASE = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )] SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )] SCREAMING_SNAKE_CASE = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 ) SCREAMING_SNAKE_CASE = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 ) SCREAMING_SNAKE_CASE = VGroup(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0 ) SCREAMING_SNAKE_CASE = Text("CPU" , font_size=24 ) SCREAMING_SNAKE_CASE = Group(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0.5 , aligned_edge=__lowerCamelCase ) cpu.move_to([-2.5, -0.5, 0] ) self.add(__lowerCamelCase ) SCREAMING_SNAKE_CASE = [mem.copy() for i in range(4 )] SCREAMING_SNAKE_CASE = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 ) SCREAMING_SNAKE_CASE = Text("GPU" , font_size=24 ) SCREAMING_SNAKE_CASE = Group(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0.5 , aligned_edge=__lowerCamelCase ) gpu.move_to([-1, -1, 0] ) self.add(__lowerCamelCase ) SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )] SCREAMING_SNAKE_CASE = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 ) SCREAMING_SNAKE_CASE = Text("Model" , font_size=24 ) SCREAMING_SNAKE_CASE = Group(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0.5 , aligned_edge=__lowerCamelCase ) model.move_to([3, -1.0, 0] ) self.add(__lowerCamelCase ) SCREAMING_SNAKE_CASE = [] for i, rect in enumerate(__lowerCamelCase ): rect.set_stroke(__lowerCamelCase ) # target = fill.copy().set_fill(YELLOW, opacity=0.7) # target.move_to(rect) # self.add(target) SCREAMING_SNAKE_CASE = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__lowerCamelCase , opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__lowerCamelCase ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(cpu_targs[0] , direction=__lowerCamelCase , buff=0.0 ) else: cpu_target.next_to(cpu_targs[i - 1] , direction=__lowerCamelCase , buff=0.0 ) self.add(__lowerCamelCase ) cpu_targs.append(__lowerCamelCase ) SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )] SCREAMING_SNAKE_CASE = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 ) SCREAMING_SNAKE_CASE = Text("Loaded Checkpoint" , font_size=24 ) SCREAMING_SNAKE_CASE = Group(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , aligned_edge=__lowerCamelCase , buff=0.4 ) checkpoint.move_to([3, 0.5, 0] ) SCREAMING_SNAKE_CASE = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) SCREAMING_SNAKE_CASE = MarkupText( f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(__lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE = MarkupText( f"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , ) blue_text.next_to(__lowerCamelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() ) SCREAMING_SNAKE_CASE = MarkupText( f"Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>." , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(__lowerCamelCase ) , Write(__lowerCamelCase ) ) self.play(Write(__lowerCamelCase , run_time=1 ) , Create(__lowerCamelCase , run_time=1 ) ) SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = [] for i, rect in enumerate(__lowerCamelCase ): SCREAMING_SNAKE_CASE = fill.copy().set_fill(__lowerCamelCase , opacity=0.7 ) target.move_to(__lowerCamelCase ) first_animations.append(GrowFromCenter(__lowerCamelCase , run_time=1 ) ) SCREAMING_SNAKE_CASE = target.copy() cpu_target.generate_target() if i < 5: cpu_target.target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.target.move_to(cpu_right_col_base[i - 5] ) second_animations.append(MoveToTarget(__lowerCamelCase , run_time=1.5 ) ) self.play(*__lowerCamelCase ) self.play(*__lowerCamelCase ) self.wait()
698
1
import math def __a ( A__ : list , A__ : int = 0 , A__ : int = 0 ): SCREAMING_SNAKE_CASE = end or len(A__ ) for i in range(A__ , A__ ): SCREAMING_SNAKE_CASE = i SCREAMING_SNAKE_CASE = array[i] while temp_index != start and temp_index_value < array[temp_index - 1]: SCREAMING_SNAKE_CASE = array[temp_index - 1] temp_index -= 1 SCREAMING_SNAKE_CASE = temp_index_value return array def __a ( A__ : list , A__ : int , A__ : int ): # Max Heap SCREAMING_SNAKE_CASE = index SCREAMING_SNAKE_CASE = 2 * index + 1 # Left Node SCREAMING_SNAKE_CASE = 2 * index + 2 # Right Node if left_index < heap_size and array[largest] < array[left_index]: SCREAMING_SNAKE_CASE = left_index if right_index < heap_size and array[largest] < array[right_index]: SCREAMING_SNAKE_CASE = right_index if largest != index: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = array[largest], array[index] heapify(A__ , A__ , A__ ) def __a ( A__ : list ): SCREAMING_SNAKE_CASE = len(A__ ) for i in range(n // 2 , -1 , -1 ): heapify(A__ , A__ , A__ ) for i in range(n - 1 , 0 , -1 ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = array[0], array[i] heapify(A__ , 0 , A__ ) return array def __a ( A__ : list , A__ : int , A__ : int , A__ : int ): if (array[first_index] > array[middle_index]) != ( array[first_index] > array[last_index] ): return array[first_index] elif (array[middle_index] > array[first_index]) != ( array[middle_index] > array[last_index] ): return array[middle_index] else: return array[last_index] def __a ( A__ : list , A__ : int , A__ : int , A__ : int ): SCREAMING_SNAKE_CASE = low SCREAMING_SNAKE_CASE = high while True: while array[i] < pivot: i += 1 j -= 1 while pivot < array[j]: j -= 1 if i >= j: return i SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = array[j], array[i] i += 1 def __a ( A__ : list ): if len(A__ ) == 0: return array SCREAMING_SNAKE_CASE = 2 * math.ceil(math.loga(len(A__ ) ) ) SCREAMING_SNAKE_CASE = 16 return intro_sort(A__ , 0 , len(A__ ) , A__ , A__ ) def __a ( A__ : list , A__ : int , A__ : int , A__ : int , A__ : int ): while end - start > size_threshold: if max_depth == 0: return heap_sort(A__ ) max_depth -= 1 SCREAMING_SNAKE_CASE = median_of_a(A__ , A__ , start + ((end - start) // 2) + 1 , end - 1 ) SCREAMING_SNAKE_CASE = partition(A__ , A__ , A__ , A__ ) intro_sort(A__ , A__ , A__ , A__ , A__ ) SCREAMING_SNAKE_CASE = p return insertion_sort(A__ , A__ , A__ ) if __name__ == "__main__": import doctest doctest.testmod() __A : Dict = input('Enter numbers separated by a comma : ').strip() __A : Any = [float(item) for item in user_input.split(',')] print(sort(unsorted))
698
import unittest from transformers import is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class _SCREAMING_SNAKE_CASE : '''simple docstring''' @staticmethod def _snake_case ( *__lowerCamelCase : Any , **__lowerCamelCase : Any ): pass @is_pipeline_test @require_vision class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' @require_torch def _snake_case ( self : Dict ): SCREAMING_SNAKE_CASE = pipeline( model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , ) SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) SCREAMING_SNAKE_CASE = image_classifier(__lowerCamelCase , candidate_labels=["a", "b", "c"] ) # The floating scores are so close, we enter floating error approximation and the order is not guaranteed across # python and torch versions. self.assertIn( nested_simplify(__lowerCamelCase ) , [ [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}], [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}], ] , ) SCREAMING_SNAKE_CASE = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 ) self.assertEqual( nested_simplify(__lowerCamelCase ) , [ [ {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, ], [ {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, ], [ {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, ], [ {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, ], [ {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, ], ] , ) @require_tf def _snake_case ( self : int ): SCREAMING_SNAKE_CASE = pipeline( model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , framework="tf" ) SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) SCREAMING_SNAKE_CASE = image_classifier(__lowerCamelCase , candidate_labels=["a", "b", "c"] ) self.assertEqual( nested_simplify(__lowerCamelCase ) , [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}] , ) SCREAMING_SNAKE_CASE = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 ) self.assertEqual( nested_simplify(__lowerCamelCase ) , [ [ {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, ], [ {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, ], [ {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, ], [ {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, ], [ {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, ], ] , ) @slow @require_torch def _snake_case ( self : Tuple ): SCREAMING_SNAKE_CASE = pipeline( task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , ) # This is an image of 2 cats with remotes and no planes SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) SCREAMING_SNAKE_CASE = image_classifier(__lowerCamelCase , candidate_labels=["cat", "plane", "remote"] ) self.assertEqual( nested_simplify(__lowerCamelCase ) , [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ] , ) SCREAMING_SNAKE_CASE = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 ) self.assertEqual( nested_simplify(__lowerCamelCase ) , [ [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ] * 5 , ) @slow @require_tf def _snake_case ( self : Any ): SCREAMING_SNAKE_CASE = pipeline( task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , framework="tf" ) # This is an image of 2 cats with remotes and no planes SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) SCREAMING_SNAKE_CASE = image_classifier(__lowerCamelCase , candidate_labels=["cat", "plane", "remote"] ) self.assertEqual( nested_simplify(__lowerCamelCase ) , [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ] , ) SCREAMING_SNAKE_CASE = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 ) self.assertEqual( nested_simplify(__lowerCamelCase ) , [ [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ] * 5 , )
698
1
from ....configuration_utils import PretrainedConfig from ....utils import logging __A : Tuple = logging.get_logger(__name__) __A : List[Any] = { 'Visual-Attention-Network/van-base': ( 'https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json' ), } class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = "van" def __init__( self : List[str] , __lowerCamelCase : Union[str, Any]=224 , __lowerCamelCase : Tuple=3 , __lowerCamelCase : List[Any]=[7, 3, 3, 3] , __lowerCamelCase : List[str]=[4, 2, 2, 2] , __lowerCamelCase : List[Any]=[64, 128, 320, 512] , __lowerCamelCase : Optional[Any]=[3, 3, 12, 3] , __lowerCamelCase : List[str]=[8, 8, 4, 4] , __lowerCamelCase : Optional[int]="gelu" , __lowerCamelCase : Union[str, Any]=0.02 , __lowerCamelCase : List[Any]=1e-6 , __lowerCamelCase : Tuple=1e-2 , __lowerCamelCase : Optional[Any]=0.0 , __lowerCamelCase : List[Any]=0.0 , **__lowerCamelCase : Dict , ): super().__init__(**__lowerCamelCase ) SCREAMING_SNAKE_CASE = image_size SCREAMING_SNAKE_CASE = num_channels SCREAMING_SNAKE_CASE = patch_sizes SCREAMING_SNAKE_CASE = strides SCREAMING_SNAKE_CASE = hidden_sizes SCREAMING_SNAKE_CASE = depths SCREAMING_SNAKE_CASE = mlp_ratios SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = layer_norm_eps SCREAMING_SNAKE_CASE = layer_scale_init_value SCREAMING_SNAKE_CASE = drop_path_rate SCREAMING_SNAKE_CASE = dropout_rate
698
__A : dict[str, float] = { "joule": 1.0, "kilojoule": 1_0_0_0, "megajoule": 1_0_0_0_0_0_0, "gigajoule": 1_0_0_0_0_0_0_0_0_0, "wattsecond": 1.0, "watthour": 3_6_0_0, "kilowatthour": 3_6_0_0_0_0_0, "newtonmeter": 1.0, "calorie_nutr": 4_1_8_6.8, "kilocalorie_nutr": 4_1_8_6_8_0_0.0_0, "electronvolt": 1.6_0217_6634e-19, "britishthermalunit_it": 1_0_5_5.0_5_5_8_5, "footpound": 1.355_818, } def __a ( A__ : str , A__ : str , A__ : float ): if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION: SCREAMING_SNAKE_CASE = ( F"Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n" F"Valid values are: {', '.join(A__ )}" ) raise ValueError(A__ ) return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type] if __name__ == "__main__": import doctest doctest.testmod()
698
1
import argparse from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird from transformers.utils import logging logging.set_verbosity_info() def __a ( A__ : List[str] , A__ : Optional[Any] , A__ : Union[str, Any] , A__ : List[Any] ): # Initialise PyTorch model SCREAMING_SNAKE_CASE = BigBirdConfig.from_json_file(A__ ) print(F"Building PyTorch model from configuration: {config}" ) if is_trivia_qa: SCREAMING_SNAKE_CASE = BigBirdForQuestionAnswering(A__ ) else: SCREAMING_SNAKE_CASE = BigBirdForPreTraining(A__ ) # Load weights from tf checkpoint load_tf_weights_in_big_bird(A__ , A__ , is_trivia_qa=A__ ) # Save pytorch-model print(F"Save PyTorch model to {pytorch_dump_path}" ) model.save_pretrained(A__ ) if __name__ == "__main__": __A : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--big_bird_config_file', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained BERT model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument( '--is_trivia_qa', action='store_true', help='Whether to convert a model with a trivia_qa head.' ) __A : List[Any] = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa )
698
from collections import deque from .hash_table import HashTable class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' def __init__( self : Optional[int] , *__lowerCamelCase : List[Any] , **__lowerCamelCase : Optional[Any] ): super().__init__(*__lowerCamelCase , **__lowerCamelCase ) def _snake_case ( self : Any , __lowerCamelCase : Dict , __lowerCamelCase : Tuple ): SCREAMING_SNAKE_CASE = deque([] ) if self.values[key] is None else self.values[key] self.values[key].appendleft(__lowerCamelCase ) SCREAMING_SNAKE_CASE = self.values[key] def _snake_case ( self : Union[str, Any] ): return ( sum(self.charge_factor - len(__lowerCamelCase ) for slot in self.values ) / self.size_table * self.charge_factor ) def _snake_case ( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Tuple=None ): if not ( len(self.values[key] ) == self.charge_factor and self.values.count(__lowerCamelCase ) == 0 ): return key return super()._collision_resolution(__lowerCamelCase , __lowerCamelCase )
698
1
import gc import unittest import numpy as np import torch from diffusers import ( AudioDiffusionPipeline, AutoencoderKL, DDIMScheduler, DDPMScheduler, DiffusionPipeline, Mel, UNetaDConditionModel, UNetaDModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self : Optional[Any] ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def _snake_case ( self : int ): torch.manual_seed(0 ) SCREAMING_SNAKE_CASE = UNetaDModel( sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , ) return model @property def _snake_case ( self : Optional[Any] ): torch.manual_seed(0 ) SCREAMING_SNAKE_CASE = UNetaDConditionModel( sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , cross_attention_dim=10 , ) return model @property def _snake_case ( self : List[Any] ): torch.manual_seed(0 ) SCREAMING_SNAKE_CASE = AutoencoderKL( sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , ) SCREAMING_SNAKE_CASE = UNetaDModel( sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , ) return vqvae, unet @slow def _snake_case ( self : int ): SCREAMING_SNAKE_CASE = "cpu" # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE = Mel( x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , ) SCREAMING_SNAKE_CASE = DDPMScheduler() SCREAMING_SNAKE_CASE = AudioDiffusionPipeline(vqvae=__lowerCamelCase , unet=self.dummy_unet , mel=__lowerCamelCase , scheduler=__lowerCamelCase ) SCREAMING_SNAKE_CASE = pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) SCREAMING_SNAKE_CASE = torch.Generator(device=__lowerCamelCase ).manual_seed(42 ) SCREAMING_SNAKE_CASE = pipe(generator=__lowerCamelCase , steps=4 ) SCREAMING_SNAKE_CASE = output.audios[0] SCREAMING_SNAKE_CASE = output.images[0] SCREAMING_SNAKE_CASE = torch.Generator(device=__lowerCamelCase ).manual_seed(42 ) SCREAMING_SNAKE_CASE = pipe(generator=__lowerCamelCase , steps=4 , return_dict=__lowerCamelCase ) SCREAMING_SNAKE_CASE = output[0][0] assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length) assert ( image.height == self.dummy_unet.config.sample_size[0] and image.width == self.dummy_unet.config.sample_size[1] ) SCREAMING_SNAKE_CASE = np.frombuffer(image.tobytes() , dtype="uint8" )[:10] SCREAMING_SNAKE_CASE = np.frombuffer(image_from_tuple.tobytes() , dtype="uint8" )[:10] SCREAMING_SNAKE_CASE = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0 SCREAMING_SNAKE_CASE = Mel( x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , ) SCREAMING_SNAKE_CASE = DDIMScheduler() SCREAMING_SNAKE_CASE = self.dummy_vqvae_and_unet SCREAMING_SNAKE_CASE = AudioDiffusionPipeline( vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=__lowerCamelCase , scheduler=__lowerCamelCase ) SCREAMING_SNAKE_CASE = pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) np.random.seed(0 ) SCREAMING_SNAKE_CASE = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) ) SCREAMING_SNAKE_CASE = torch.Generator(device=__lowerCamelCase ).manual_seed(42 ) SCREAMING_SNAKE_CASE = pipe(raw_audio=__lowerCamelCase , generator=__lowerCamelCase , start_step=5 , steps=10 ) SCREAMING_SNAKE_CASE = output.images[0] assert ( image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0] and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1] ) SCREAMING_SNAKE_CASE = np.frombuffer(image.tobytes() , dtype="uint8" )[:10] SCREAMING_SNAKE_CASE = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 SCREAMING_SNAKE_CASE = self.dummy_unet_condition SCREAMING_SNAKE_CASE = AudioDiffusionPipeline( vqvae=self.dummy_vqvae_and_unet[0] , unet=__lowerCamelCase , mel=__lowerCamelCase , scheduler=__lowerCamelCase ) SCREAMING_SNAKE_CASE = pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) np.random.seed(0 ) SCREAMING_SNAKE_CASE = torch.rand((1, 1, 10) ) SCREAMING_SNAKE_CASE = pipe(generator=__lowerCamelCase , encoding=__lowerCamelCase ) SCREAMING_SNAKE_CASE = output.images[0] SCREAMING_SNAKE_CASE = np.frombuffer(image.tobytes() , dtype="uint8" )[:10] SCREAMING_SNAKE_CASE = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 @slow @require_torch_gpu class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self : Dict ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _snake_case ( self : Tuple ): SCREAMING_SNAKE_CASE = torch_device SCREAMING_SNAKE_CASE = DiffusionPipeline.from_pretrained("teticio/audio-diffusion-ddim-256" ) SCREAMING_SNAKE_CASE = pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) SCREAMING_SNAKE_CASE = torch.Generator(device=__lowerCamelCase ).manual_seed(42 ) SCREAMING_SNAKE_CASE = pipe(generator=__lowerCamelCase ) SCREAMING_SNAKE_CASE = output.audios[0] SCREAMING_SNAKE_CASE = output.images[0] assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length) assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1] SCREAMING_SNAKE_CASE = np.frombuffer(image.tobytes() , dtype="uint8" )[:10] SCREAMING_SNAKE_CASE = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
698
from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast from ...utils import logging __A : Optional[int] = logging.get_logger(__name__) __A : int = { 'EleutherAI/gpt-neo-1.3B': 'https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json', # See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo } class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = "gpt_neo" lowerCamelCase__ = ["past_key_values"] lowerCamelCase__ = {"num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"} def __init__( self : str , __lowerCamelCase : Dict=50257 , __lowerCamelCase : Tuple=2048 , __lowerCamelCase : Optional[Any]=2048 , __lowerCamelCase : int=24 , __lowerCamelCase : int=[[["global", "local"], 12]] , __lowerCamelCase : int=16 , __lowerCamelCase : List[str]=None , __lowerCamelCase : List[Any]=256 , __lowerCamelCase : Tuple="gelu_new" , __lowerCamelCase : Optional[Any]=0.0 , __lowerCamelCase : str=0.0 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : List[Any]=1e-5 , __lowerCamelCase : Dict=0.02 , __lowerCamelCase : Dict=True , __lowerCamelCase : Dict=50256 , __lowerCamelCase : Optional[int]=50256 , **__lowerCamelCase : Dict , ): SCREAMING_SNAKE_CASE = vocab_size SCREAMING_SNAKE_CASE = max_position_embeddings SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = num_layers SCREAMING_SNAKE_CASE = num_heads SCREAMING_SNAKE_CASE = intermediate_size SCREAMING_SNAKE_CASE = window_size SCREAMING_SNAKE_CASE = activation_function SCREAMING_SNAKE_CASE = resid_dropout SCREAMING_SNAKE_CASE = embed_dropout SCREAMING_SNAKE_CASE = attention_dropout SCREAMING_SNAKE_CASE = classifier_dropout SCREAMING_SNAKE_CASE = layer_norm_epsilon SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = use_cache SCREAMING_SNAKE_CASE = bos_token_id SCREAMING_SNAKE_CASE = eos_token_id SCREAMING_SNAKE_CASE = attention_types SCREAMING_SNAKE_CASE = self.expand_attention_types_params(__lowerCamelCase ) if len(self.attention_layers ) != self.num_layers: raise ValueError( "Configuration for convolutional module is incorrect. " "It is required that `len(config.attention_layers)` == `config.num_layers` " f"but is `len(config.attention_layers) = {len(self.attention_layers )}`, " f"`config.num_layers = {self.num_layers}`. " "`config.attention_layers` is prepared using `config.attention_types`. " "Please verify the value of `config.attention_types` argument." ) super().__init__(bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase ) @staticmethod def _snake_case ( __lowerCamelCase : Dict ): SCREAMING_SNAKE_CASE = [] for item in attention_types: for _ in range(item[1] ): attentions.extend(item[0] ) return attentions def __a ( A__ : str , A__ : List[Any] , A__ : List[str] , A__ : Union[str, Any] ): import torch SCREAMING_SNAKE_CASE = input.size() SCREAMING_SNAKE_CASE = len(A__ ) SCREAMING_SNAKE_CASE = shape[dimension] SCREAMING_SNAKE_CASE = torch.arange(0 , A__ , A__ ) SCREAMING_SNAKE_CASE = torch.div(sizedim - size , A__ , rounding_mode="floor" ) + 1 SCREAMING_SNAKE_CASE = torch.arange(A__ ) + low_indices[:min_length][:, None] SCREAMING_SNAKE_CASE = [slice(A__ )] * rank SCREAMING_SNAKE_CASE = indices SCREAMING_SNAKE_CASE = input[s] SCREAMING_SNAKE_CASE = list(range(0 , rank + 1 ) ) perm.append(perm.pop(dimension + 1 ) ) return sliced.permute(A__ ) def __a ( A__ : Union[str, Any] , A__ : Optional[int] ): import torch SCREAMING_SNAKE_CASE = torch.arange(1 , A__ ) SCREAMING_SNAKE_CASE = torch.remainder(A__ , A__ ) SCREAMING_SNAKE_CASE = remainders == 0 SCREAMING_SNAKE_CASE = candidates[divisor_indices] SCREAMING_SNAKE_CASE = torch.max(A__ ) return largest_divisor, torch.div(A__ , A__ , rounding_mode="floor" ) class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' @property def _snake_case ( self : Any ): SCREAMING_SNAKE_CASE = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} ) if self.use_past: self.fill_with_past_key_values_(__lowerCamelCase , direction="inputs" ) SCREAMING_SNAKE_CASE = {0: "batch", 1: "past_sequence + sequence"} else: SCREAMING_SNAKE_CASE = {0: "batch", 1: "sequence"} return common_inputs @property def _snake_case ( self : Optional[int] ): return self._config.num_heads def _snake_case ( self : Union[str, Any] , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ): SCREAMING_SNAKE_CASE = super(__lowerCamelCase , self ).generate_dummy_inputs( __lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase ) # We need to order the input in the way they appears in the forward() SCREAMING_SNAKE_CASE = OrderedDict({"input_ids": common_inputs["input_ids"]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = common_inputs["input_ids"].shape # Not using the same length for past_key_values SCREAMING_SNAKE_CASE = seqlen + 2 SCREAMING_SNAKE_CASE = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) SCREAMING_SNAKE_CASE = [ (torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase )) for _ in range(self.num_layers ) ] SCREAMING_SNAKE_CASE = common_inputs["attention_mask"] if self.use_past: SCREAMING_SNAKE_CASE = ordered_inputs["attention_mask"].dtype SCREAMING_SNAKE_CASE = torch.cat( [ordered_inputs["attention_mask"], torch.ones(__lowerCamelCase , __lowerCamelCase , dtype=__lowerCamelCase )] , dim=1 ) return ordered_inputs @property def _snake_case ( self : Optional[int] ): return 13
698
1
from itertools import product def __a ( A__ : int , A__ : int ): SCREAMING_SNAKE_CASE = sides_number SCREAMING_SNAKE_CASE = max_face_number * dice_number SCREAMING_SNAKE_CASE = [0] * (max_total + 1) SCREAMING_SNAKE_CASE = 1 SCREAMING_SNAKE_CASE = range(A__ , max_face_number + 1 ) for dice_numbers in product(A__ , repeat=A__ ): SCREAMING_SNAKE_CASE = sum(A__ ) totals_frequencies[total] += 1 return totals_frequencies def __a ( ): SCREAMING_SNAKE_CASE = total_frequency_distribution( sides_number=4 , dice_number=9 ) SCREAMING_SNAKE_CASE = total_frequency_distribution( sides_number=6 , dice_number=6 ) SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = 9 SCREAMING_SNAKE_CASE = 4 * 9 SCREAMING_SNAKE_CASE = 6 for peter_total in range(A__ , max_peter_total + 1 ): peter_wins_count += peter_totals_frequencies[peter_total] * sum( colin_totals_frequencies[min_colin_total:peter_total] ) SCREAMING_SNAKE_CASE = (4**9) * (6**6) SCREAMING_SNAKE_CASE = peter_wins_count / total_games_number SCREAMING_SNAKE_CASE = round(A__ , ndigits=7 ) return rounded_peter_win_probability if __name__ == "__main__": print(f'{solution() = }')
698
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available __A : Any = { 'configuration_longt5': ['LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LongT5Config', 'LongT5OnnxConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : List[str] = [ 'LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST', 'LongT5EncoderModel', 'LongT5ForConditionalGeneration', 'LongT5Model', 'LongT5PreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : List[Any] = [ 'FlaxLongT5ForConditionalGeneration', 'FlaxLongT5Model', 'FlaxLongT5PreTrainedModel', ] if TYPE_CHECKING: from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longta import ( LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST, LongTaEncoderModel, LongTaForConditionalGeneration, LongTaModel, LongTaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_longta import ( FlaxLongTaForConditionalGeneration, FlaxLongTaModel, FlaxLongTaPreTrainedModel, ) else: import sys __A : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
698
1
import argparse from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt if __name__ == "__main__": __A : int = argparse.ArgumentParser() parser.add_argument( '--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.' ) parser.add_argument( '--original_config_file', type=str, required=True, help='The YAML config file corresponding to the original architecture.', ) parser.add_argument( '--num_in_channels', default=None, type=int, help='The number of input channels. If `None` number of input channels will be automatically inferred.', ) parser.add_argument( '--image_size', default=5_1_2, type=int, help=( 'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2' ' Base. Use 768 for Stable Diffusion v2.' ), ) parser.add_argument( '--extract_ema', action='store_true', help=( 'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights' ' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield' ' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.' ), ) parser.add_argument( '--upcast_attention', action='store_true', help=( 'Whether the attention computation should always be upcasted. This is necessary when running stable' ' diffusion 2.1.' ), ) parser.add_argument( '--from_safetensors', action='store_true', help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.', ) parser.add_argument( '--to_safetensors', action='store_true', help='Whether to store pipeline in safetensors format or not.', ) parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)') def __a ( A__ : List[Any] ): if string == "True": return True elif string == "False": return False else: raise ValueError(F"could not parse string as bool {string}" ) parser.add_argument( '--use_linear_projection', help='Override for use linear projection', required=False, type=parse_bool ) parser.add_argument('--cross_attention_dim', help='Override for cross attention_dim', required=False, type=int) __A : Union[str, Any] = parser.parse_args() __A : int = download_controlnet_from_original_ckpt( checkpoint_path=args.checkpoint_path, original_config_file=args.original_config_file, image_size=args.image_size, extract_ema=args.extract_ema, num_in_channels=args.num_in_channels, upcast_attention=args.upcast_attention, from_safetensors=args.from_safetensors, device=args.device, use_linear_projection=args.use_linear_projection, cross_attention_dim=args.cross_attention_dim, ) controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
698
import cmath import math def __a ( A__ : float , A__ : float , A__ : float , A__ : float ): SCREAMING_SNAKE_CASE = math.radians(A__ ) SCREAMING_SNAKE_CASE = math.radians(A__ ) # Convert voltage and current to rectangular form SCREAMING_SNAKE_CASE = cmath.rect(A__ , A__ ) SCREAMING_SNAKE_CASE = cmath.rect(A__ , A__ ) # Calculate apparent power return voltage_rect * current_rect if __name__ == "__main__": import doctest doctest.testmod()
698
1
import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_torch_available from transformers.testing_utils import require_torch, torch_device if is_torch_available(): from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments @require_torch class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self : Optional[Any] , __lowerCamelCase : List[Any] ): for model_result in results.values(): for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"] ): SCREAMING_SNAKE_CASE = model_result["result"][batch_size][sequence_length] self.assertIsNotNone(__lowerCamelCase ) def _snake_case ( self : Optional[Any] ): SCREAMING_SNAKE_CASE = "sshleifer/tiny-gpt2" SCREAMING_SNAKE_CASE = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , ) SCREAMING_SNAKE_CASE = PyTorchBenchmark(__lowerCamelCase ) SCREAMING_SNAKE_CASE = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _snake_case ( self : Any ): SCREAMING_SNAKE_CASE = "sgugger/tiny-distilbert-classification" SCREAMING_SNAKE_CASE = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , only_pretrain_model=__lowerCamelCase , ) SCREAMING_SNAKE_CASE = PyTorchBenchmark(__lowerCamelCase ) SCREAMING_SNAKE_CASE = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _snake_case ( self : List[Any] ): SCREAMING_SNAKE_CASE = "sshleifer/tiny-gpt2" SCREAMING_SNAKE_CASE = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , torchscript=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , ) SCREAMING_SNAKE_CASE = PyTorchBenchmark(__lowerCamelCase ) SCREAMING_SNAKE_CASE = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(torch_device == "cpu" , "Cant do half precision" ) def _snake_case ( self : str ): SCREAMING_SNAKE_CASE = "sshleifer/tiny-gpt2" SCREAMING_SNAKE_CASE = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , fpaa=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , ) SCREAMING_SNAKE_CASE = PyTorchBenchmark(__lowerCamelCase ) SCREAMING_SNAKE_CASE = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _snake_case ( self : str ): SCREAMING_SNAKE_CASE = "sshleifer/tiny-gpt2" SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(__lowerCamelCase ) # set architectures equal to `None` SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , ) SCREAMING_SNAKE_CASE = PyTorchBenchmark(__lowerCamelCase , configs=[config] ) SCREAMING_SNAKE_CASE = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _snake_case ( self : str ): SCREAMING_SNAKE_CASE = "sshleifer/tiny-gpt2" SCREAMING_SNAKE_CASE = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , ) SCREAMING_SNAKE_CASE = PyTorchBenchmark(__lowerCamelCase ) SCREAMING_SNAKE_CASE = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) @unittest.skipIf(torch_device == "cpu" , "Can't do half precision" ) def _snake_case ( self : str ): SCREAMING_SNAKE_CASE = "sshleifer/tiny-gpt2" SCREAMING_SNAKE_CASE = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=__lowerCamelCase , multi_process=__lowerCamelCase , ) SCREAMING_SNAKE_CASE = PyTorchBenchmark(__lowerCamelCase ) SCREAMING_SNAKE_CASE = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _snake_case ( self : List[str] ): SCREAMING_SNAKE_CASE = "sshleifer/tiny-gpt2" SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(__lowerCamelCase ) SCREAMING_SNAKE_CASE = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , ) SCREAMING_SNAKE_CASE = PyTorchBenchmark(__lowerCamelCase , configs=[config] ) SCREAMING_SNAKE_CASE = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _snake_case ( self : Optional[int] ): SCREAMING_SNAKE_CASE = "sshleifer/tinier_bart" SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(__lowerCamelCase ) SCREAMING_SNAKE_CASE = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , ) SCREAMING_SNAKE_CASE = PyTorchBenchmark(__lowerCamelCase , configs=[config] ) SCREAMING_SNAKE_CASE = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _snake_case ( self : Tuple ): SCREAMING_SNAKE_CASE = "sshleifer/tiny-gpt2" SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(__lowerCamelCase ) SCREAMING_SNAKE_CASE = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , ) SCREAMING_SNAKE_CASE = PyTorchBenchmark(__lowerCamelCase , configs=[config] ) SCREAMING_SNAKE_CASE = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _snake_case ( self : Optional[Any] ): SCREAMING_SNAKE_CASE = "sshleifer/tinier_bart" SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(__lowerCamelCase ) SCREAMING_SNAKE_CASE = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , ) SCREAMING_SNAKE_CASE = PyTorchBenchmark(__lowerCamelCase , configs=[config] ) SCREAMING_SNAKE_CASE = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _snake_case ( self : int ): SCREAMING_SNAKE_CASE = "sshleifer/tiny-gpt2" with tempfile.TemporaryDirectory() as tmp_dir: SCREAMING_SNAKE_CASE = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , save_to_csv=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__lowerCamelCase , "inf_time.csv" ) , train_memory_csv_file=os.path.join(__lowerCamelCase , "train_mem.csv" ) , inference_memory_csv_file=os.path.join(__lowerCamelCase , "inf_mem.csv" ) , train_time_csv_file=os.path.join(__lowerCamelCase , "train_time.csv" ) , env_info_csv_file=os.path.join(__lowerCamelCase , "env.csv" ) , multi_process=__lowerCamelCase , ) SCREAMING_SNAKE_CASE = PyTorchBenchmark(__lowerCamelCase ) benchmark.run() self.assertTrue(Path(os.path.join(__lowerCamelCase , "inf_time.csv" ) ).exists() ) self.assertTrue(Path(os.path.join(__lowerCamelCase , "train_time.csv" ) ).exists() ) self.assertTrue(Path(os.path.join(__lowerCamelCase , "inf_mem.csv" ) ).exists() ) self.assertTrue(Path(os.path.join(__lowerCamelCase , "train_mem.csv" ) ).exists() ) self.assertTrue(Path(os.path.join(__lowerCamelCase , "env.csv" ) ).exists() ) def _snake_case ( self : str ): SCREAMING_SNAKE_CASE = "sshleifer/tiny-gpt2" def _check_summary_is_not_empty(__lowerCamelCase : str ): self.assertTrue(hasattr(__lowerCamelCase , "sequential" ) ) self.assertTrue(hasattr(__lowerCamelCase , "cumulative" ) ) self.assertTrue(hasattr(__lowerCamelCase , "current" ) ) self.assertTrue(hasattr(__lowerCamelCase , "total" ) ) with tempfile.TemporaryDirectory() as tmp_dir: SCREAMING_SNAKE_CASE = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__lowerCamelCase , "log.txt" ) , log_print=__lowerCamelCase , trace_memory_line_by_line=__lowerCamelCase , multi_process=__lowerCamelCase , ) SCREAMING_SNAKE_CASE = PyTorchBenchmark(__lowerCamelCase ) SCREAMING_SNAKE_CASE = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) _check_summary_is_not_empty(result.train_summary ) self.assertTrue(Path(os.path.join(__lowerCamelCase , "log.txt" ) ).exists() )
698
import argparse import json import os import torch from torch import nn from transformers import NllbMoeConfig, NllbMoeModel from transformers.modeling_utils import dtype_byte_size from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME def __a ( A__ : List[str] ): SCREAMING_SNAKE_CASE = [ "encoder.version", "decoder.version", "model.encoder.version", "model.decoder.version", "decoder.output_projection.weight", "_float_tensor", "encoder.embed_positions._float_tensor", "decoder.embed_positions._float_tensor", ] for k in ignore_keys: state_dict.pop(A__ , A__ ) def __a ( A__ : Tuple ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = emb.weight.shape SCREAMING_SNAKE_CASE = nn.Linear(A__ , A__ , bias=A__ ) SCREAMING_SNAKE_CASE = emb.weight.data return lin_layer def __a ( A__ : Tuple , A__ : Union[str, Any]=None ): SCREAMING_SNAKE_CASE = {} for old_key in state_dict.keys(): SCREAMING_SNAKE_CASE = old_key if "moe_layer.experts." in key: if expert_idx is not None: SCREAMING_SNAKE_CASE = key.replace("moe_layer.experts.0" , F"ffn.experts.expert_{expert_idx}" ) else: SCREAMING_SNAKE_CASE = key.replace("moe_layer.experts." , "ffn.experts.expert_" ) if "gate" in key: SCREAMING_SNAKE_CASE = key.replace(".moe_layer.gate.wg" , ".ffn.router.classifier" ) if "fc2" and "experts" not in key: SCREAMING_SNAKE_CASE = key.replace(".fc2." , ".ffn.fc2." ) if "fc1" and "experts" not in key: SCREAMING_SNAKE_CASE = key.replace(".fc1." , ".ffn.fc1." ) if ".encoder_attn." in key: SCREAMING_SNAKE_CASE = key.replace(".encoder_attn." , ".cross_attention." ) if "encoder_attn_layer_norm" in key: SCREAMING_SNAKE_CASE = key.replace("encoder_attn_layer_norm" , "cross_attention_layer_norm" ) if "final_layer_norm" in key: SCREAMING_SNAKE_CASE = key.replace("final_layer_norm" , "ff_layer_norm" ) SCREAMING_SNAKE_CASE = state_dict[old_key] return new_dict def __a ( A__ : List[str] , A__ : List[Any] , A__ : str , A__ : Union[str, Any] , A__ : str = WEIGHTS_NAME ): SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = 0 os.makedirs(A__ , exist_ok=A__ ) for expert in range(A__ ): SCREAMING_SNAKE_CASE = switch_checkpoint_path + F"-rank-{expert}.pt" if os.path.isfile(A__ ): SCREAMING_SNAKE_CASE = torch.load(A__ )["model"] remove_ignore_keys_(A__ ) SCREAMING_SNAKE_CASE = rename_fairseq_keys(A__ , A__ ) SCREAMING_SNAKE_CASE = os.path.join( A__ , weights_name.replace(".bin" , F"-{len(A__ )+1:05d}-of-???.bin" ) ) torch.save(A__ , A__ ) sharded_state_dicts.append(expert_state.keys() ) total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size( expert_state[list(A__ )[0]].dtype ) # Add the last block SCREAMING_SNAKE_CASE = os.path.join(A__ , weights_name.replace(".bin" , F"-{len(A__ )+1:05d}-of-???.bin" ) ) SCREAMING_SNAKE_CASE = torch.load(switch_checkpoint_path + "-shared.pt" )["model"] remove_ignore_keys_(A__ ) SCREAMING_SNAKE_CASE = rename_fairseq_keys(A__ , A__ ) SCREAMING_SNAKE_CASE = shared_weights["decoder.embed_tokens.weight"] sharded_state_dicts.append(shared_weights.keys() ) # If we only have the shared weights (dummy model/experts saved on the same file) if len(A__ ) == 1: SCREAMING_SNAKE_CASE = os.path.join(A__ , A__ ) torch.save(A__ , A__ ) return {weights_name: sharded_state_dicts[0]}, None else: torch.save(A__ , A__ ) # Otherwise, let's build the index SCREAMING_SNAKE_CASE = {} for idx, shard in enumerate(A__ ): SCREAMING_SNAKE_CASE = weights_name.replace(".bin" , F"-{idx+1:05d}-of-{len(A__ ):05d}.bin" ) SCREAMING_SNAKE_CASE = os.path.join(A__ , weights_name.replace(".bin" , F"-{idx+1:05d}-of-???.bin" ) ) os.rename(A__ , os.path.join(A__ , A__ ) ) for key in shard: SCREAMING_SNAKE_CASE = shard_file # Add the metadata SCREAMING_SNAKE_CASE = {"total_size": total_size} SCREAMING_SNAKE_CASE = {"metadata": metadata, "weight_map": weight_map} with open(os.path.join(A__ , A__ ) , "w" , encoding="utf-8" ) as f: SCREAMING_SNAKE_CASE = json.dumps(A__ , indent=2 , sort_keys=A__ ) + "\n" f.write(A__ ) return metadata, index if __name__ == "__main__": __A : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( '--nllb_moe_checkpoint_path', default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000', type=str, required=False, help='Path to a directory containing a folder per layer. Follows the original Google format.', ) parser.add_argument('--dtype', default='float32', type=str, required=False, help='dtype of the saved model') parser.add_argument( '--pytorch_dump_folder_path', default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b', type=str, required=False, help='Path to the output pytorch model.', ) __A : Optional[int] = parser.parse_args() __A , __A : Union[str, Any] = shard_on_the_fly( args.nllb_moe_checkpoint_path, args.pytorch_dump_folder_path, 1_2_8, args.dtype, ) __A : Any = NllbMoeConfig.from_pretrained( 'facebook/nllb-200-3.3B', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=1_2_8 ) config.save_pretrained(args.pytorch_dump_folder_path) __A : Any = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path) print('Done') model.save_pretrained(args.pytorch_dump_folder_path)
698
1
import argparse import glob import logging import os from argparse import Namespace from importlib import import_module import numpy as np import torch from lightning_base import BaseTransformer, add_generic_args, generic_train from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score from torch.nn import CrossEntropyLoss from torch.utils.data import DataLoader, TensorDataset from utils_ner import TokenClassificationTask __A : int = logging.getLogger(__name__) class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = "token-classification" def __init__( self : Dict , __lowerCamelCase : List[Any] ): if type(__lowerCamelCase ) == dict: SCREAMING_SNAKE_CASE = Namespace(**__lowerCamelCase ) SCREAMING_SNAKE_CASE = import_module("tasks" ) try: SCREAMING_SNAKE_CASE = getattr(__lowerCamelCase , hparams.task_type ) SCREAMING_SNAKE_CASE = token_classification_task_clazz() except AttributeError: raise ValueError( f"Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. " f"Available tasks classes are: {TokenClassificationTask.__subclasses__()}" ) SCREAMING_SNAKE_CASE = self.token_classification_task.get_labels(hparams.labels ) SCREAMING_SNAKE_CASE = CrossEntropyLoss().ignore_index super().__init__(__lowerCamelCase , len(self.labels ) , self.mode ) def _snake_case ( self : List[Any] , **__lowerCamelCase : Any ): return self.model(**__lowerCamelCase ) def _snake_case ( self : int , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] ): SCREAMING_SNAKE_CASE = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} if self.config.model_type != "distilbert": SCREAMING_SNAKE_CASE = ( batch[2] if self.config.model_type in ["bert", "xlnet"] else None ) # XLM and RoBERTa don"t use token_type_ids SCREAMING_SNAKE_CASE = self(**__lowerCamelCase ) SCREAMING_SNAKE_CASE = outputs[0] # tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]} return {"loss": loss} def _snake_case ( self : Tuple ): SCREAMING_SNAKE_CASE = self.hparams for mode in ["train", "dev", "test"]: SCREAMING_SNAKE_CASE = self._feature_file(__lowerCamelCase ) if os.path.exists(__lowerCamelCase ) and not args.overwrite_cache: logger.info("Loading features from cached file %s" , __lowerCamelCase ) SCREAMING_SNAKE_CASE = torch.load(__lowerCamelCase ) else: logger.info("Creating features from dataset file at %s" , args.data_dir ) SCREAMING_SNAKE_CASE = self.token_classification_task.read_examples_from_file(args.data_dir , __lowerCamelCase ) SCREAMING_SNAKE_CASE = self.token_classification_task.convert_examples_to_features( __lowerCamelCase , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["xlnet"] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["xlnet"] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=__lowerCamelCase , pad_on_left=bool(self.config.model_type in ["xlnet"] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , ) logger.info("Saving features into cached file %s" , __lowerCamelCase ) torch.save(__lowerCamelCase , __lowerCamelCase ) def _snake_case ( self : Dict , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : bool = False ): SCREAMING_SNAKE_CASE = self._feature_file(__lowerCamelCase ) logger.info("Loading features from cached file %s" , __lowerCamelCase ) SCREAMING_SNAKE_CASE = torch.load(__lowerCamelCase ) SCREAMING_SNAKE_CASE = torch.tensor([f.input_ids for f in features] , dtype=torch.long ) SCREAMING_SNAKE_CASE = torch.tensor([f.attention_mask for f in features] , dtype=torch.long ) if features[0].token_type_ids is not None: SCREAMING_SNAKE_CASE = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long ) else: SCREAMING_SNAKE_CASE = torch.tensor([0 for f in features] , dtype=torch.long ) # HACK(we will not use this anymore soon) SCREAMING_SNAKE_CASE = torch.tensor([f.label_ids for f in features] , dtype=torch.long ) return DataLoader( TensorDataset(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , batch_size=__lowerCamelCase ) def _snake_case ( self : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : Any ): """Compute validation""" "" SCREAMING_SNAKE_CASE = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} if self.config.model_type != "distilbert": SCREAMING_SNAKE_CASE = ( batch[2] if self.config.model_type in ["bert", "xlnet"] else None ) # XLM and RoBERTa don"t use token_type_ids SCREAMING_SNAKE_CASE = self(**__lowerCamelCase ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = outputs[:2] SCREAMING_SNAKE_CASE = logits.detach().cpu().numpy() SCREAMING_SNAKE_CASE = inputs["labels"].detach().cpu().numpy() return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids} def _snake_case ( self : Dict , __lowerCamelCase : Tuple ): SCREAMING_SNAKE_CASE = torch.stack([x["val_loss"] for x in outputs] ).mean() SCREAMING_SNAKE_CASE = np.concatenate([x["pred"] for x in outputs] , axis=0 ) SCREAMING_SNAKE_CASE = np.argmax(__lowerCamelCase , axis=2 ) SCREAMING_SNAKE_CASE = np.concatenate([x["target"] for x in outputs] , axis=0 ) SCREAMING_SNAKE_CASE = dict(enumerate(self.labels ) ) SCREAMING_SNAKE_CASE = [[] for _ in range(out_label_ids.shape[0] )] SCREAMING_SNAKE_CASE = [[] for _ in range(out_label_ids.shape[0] )] for i in range(out_label_ids.shape[0] ): for j in range(out_label_ids.shape[1] ): if out_label_ids[i, j] != self.pad_token_label_id: out_label_list[i].append(label_map[out_label_ids[i][j]] ) preds_list[i].append(label_map[preds[i][j]] ) SCREAMING_SNAKE_CASE = { "val_loss": val_loss_mean, "accuracy_score": accuracy_score(__lowerCamelCase , __lowerCamelCase ), "precision": precision_score(__lowerCamelCase , __lowerCamelCase ), "recall": recall_score(__lowerCamelCase , __lowerCamelCase ), "f1": fa_score(__lowerCamelCase , __lowerCamelCase ), } SCREAMING_SNAKE_CASE = dict(results.items() ) SCREAMING_SNAKE_CASE = results return ret, preds_list, out_label_list def _snake_case ( self : List[str] , __lowerCamelCase : int ): # when stable SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self._eval_end(__lowerCamelCase ) SCREAMING_SNAKE_CASE = ret["log"] return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs} def _snake_case ( self : List[Any] , __lowerCamelCase : Tuple ): # updating to test_epoch_end instead of deprecated test_end SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self._eval_end(__lowerCamelCase ) # Converting to the dict required by pl # https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\ # pytorch_lightning/trainer/logging.py#L139 SCREAMING_SNAKE_CASE = ret["log"] # `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss` return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs} @staticmethod def _snake_case ( __lowerCamelCase : List[str] , __lowerCamelCase : str ): # Add NER specific options BaseTransformer.add_model_specific_args(__lowerCamelCase , __lowerCamelCase ) parser.add_argument( "--task_type" , default="NER" , type=__lowerCamelCase , help="Task type to fine tune in training (e.g. NER, POS, etc)" ) parser.add_argument( "--max_seq_length" , default=128 , type=__lowerCamelCase , help=( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) , ) parser.add_argument( "--labels" , default="" , type=__lowerCamelCase , help="Path to a file containing all labels. If not specified, CoNLL-2003 labels are used." , ) parser.add_argument( "--gpus" , default=0 , type=__lowerCamelCase , help="The number of GPUs allocated for this, it is by default 0 meaning none" , ) parser.add_argument( "--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" ) return parser if __name__ == "__main__": __A : Optional[Any] = argparse.ArgumentParser() add_generic_args(parser, os.getcwd()) __A : Union[str, Any] = NERTransformer.add_model_specific_args(parser, os.getcwd()) __A : Optional[Any] = parser.parse_args() __A : Optional[int] = NERTransformer(args) __A : int = generic_train(model, args) if args.do_predict: # See https://github.com/huggingface/transformers/issues/3159 # pl use this default format to create a checkpoint: # https://github.com/PyTorchLightning/pytorch-lightning/blob/master\ # /pytorch_lightning/callbacks/model_checkpoint.py#L322 __A : Union[str, Any] = sorted(glob.glob(os.path.join(args.output_dir, 'checkpoint-epoch=*.ckpt'), recursive=True)) __A : Union[str, Any] = model.load_from_checkpoint(checkpoints[-1]) trainer.test(model)
698
import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast @require_vision class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE = tempfile.mkdtemp() SCREAMING_SNAKE_CASE = BlipImageProcessor() SCREAMING_SNAKE_CASE = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel" ) SCREAMING_SNAKE_CASE = BlipProcessor(__lowerCamelCase , __lowerCamelCase ) processor.save_pretrained(self.tmpdirname ) def _snake_case ( self : Dict , **__lowerCamelCase : Any ): return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase ).tokenizer def _snake_case ( self : List[Any] , **__lowerCamelCase : Optional[Any] ): return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase ).image_processor def _snake_case ( self : Union[str, Any] ): shutil.rmtree(self.tmpdirname ) def _snake_case ( self : Tuple ): SCREAMING_SNAKE_CASE = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] SCREAMING_SNAKE_CASE = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def _snake_case ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) SCREAMING_SNAKE_CASE = self.get_image_processor(do_normalize=__lowerCamelCase , padding_value=1.0 ) SCREAMING_SNAKE_CASE = BlipProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__lowerCamelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __lowerCamelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __lowerCamelCase ) def _snake_case ( self : Optional[int] ): SCREAMING_SNAKE_CASE = self.get_image_processor() SCREAMING_SNAKE_CASE = self.get_tokenizer() SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) SCREAMING_SNAKE_CASE = self.prepare_image_inputs() SCREAMING_SNAKE_CASE = image_processor(__lowerCamelCase , return_tensors="np" ) SCREAMING_SNAKE_CASE = processor(images=__lowerCamelCase , return_tensors="np" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def _snake_case ( self : Dict ): SCREAMING_SNAKE_CASE = self.get_image_processor() SCREAMING_SNAKE_CASE = self.get_tokenizer() SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) SCREAMING_SNAKE_CASE = "lower newer" SCREAMING_SNAKE_CASE = processor(text=__lowerCamelCase ) SCREAMING_SNAKE_CASE = tokenizer(__lowerCamelCase , return_token_type_ids=__lowerCamelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def _snake_case ( self : str ): SCREAMING_SNAKE_CASE = self.get_image_processor() SCREAMING_SNAKE_CASE = self.get_tokenizer() SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) SCREAMING_SNAKE_CASE = "lower newer" SCREAMING_SNAKE_CASE = self.prepare_image_inputs() SCREAMING_SNAKE_CASE = processor(text=__lowerCamelCase , images=__lowerCamelCase ) self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] ) # test if it raises when no input is passed with pytest.raises(__lowerCamelCase ): processor() def _snake_case ( self : Any ): SCREAMING_SNAKE_CASE = self.get_image_processor() SCREAMING_SNAKE_CASE = self.get_tokenizer() SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) SCREAMING_SNAKE_CASE = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] SCREAMING_SNAKE_CASE = processor.batch_decode(__lowerCamelCase ) SCREAMING_SNAKE_CASE = tokenizer.batch_decode(__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) def _snake_case ( self : Dict ): SCREAMING_SNAKE_CASE = self.get_image_processor() SCREAMING_SNAKE_CASE = self.get_tokenizer() SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) SCREAMING_SNAKE_CASE = "lower newer" SCREAMING_SNAKE_CASE = self.prepare_image_inputs() SCREAMING_SNAKE_CASE = processor(text=__lowerCamelCase , images=__lowerCamelCase ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
698
1
import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast @require_vision class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE = tempfile.mkdtemp() SCREAMING_SNAKE_CASE = BlipImageProcessor() SCREAMING_SNAKE_CASE = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel" ) SCREAMING_SNAKE_CASE = BlipProcessor(__lowerCamelCase , __lowerCamelCase ) processor.save_pretrained(self.tmpdirname ) def _snake_case ( self : Dict , **__lowerCamelCase : Any ): return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase ).tokenizer def _snake_case ( self : List[Any] , **__lowerCamelCase : Optional[Any] ): return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase ).image_processor def _snake_case ( self : Union[str, Any] ): shutil.rmtree(self.tmpdirname ) def _snake_case ( self : Tuple ): SCREAMING_SNAKE_CASE = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] SCREAMING_SNAKE_CASE = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def _snake_case ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) SCREAMING_SNAKE_CASE = self.get_image_processor(do_normalize=__lowerCamelCase , padding_value=1.0 ) SCREAMING_SNAKE_CASE = BlipProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__lowerCamelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __lowerCamelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __lowerCamelCase ) def _snake_case ( self : Optional[int] ): SCREAMING_SNAKE_CASE = self.get_image_processor() SCREAMING_SNAKE_CASE = self.get_tokenizer() SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) SCREAMING_SNAKE_CASE = self.prepare_image_inputs() SCREAMING_SNAKE_CASE = image_processor(__lowerCamelCase , return_tensors="np" ) SCREAMING_SNAKE_CASE = processor(images=__lowerCamelCase , return_tensors="np" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def _snake_case ( self : Dict ): SCREAMING_SNAKE_CASE = self.get_image_processor() SCREAMING_SNAKE_CASE = self.get_tokenizer() SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) SCREAMING_SNAKE_CASE = "lower newer" SCREAMING_SNAKE_CASE = processor(text=__lowerCamelCase ) SCREAMING_SNAKE_CASE = tokenizer(__lowerCamelCase , return_token_type_ids=__lowerCamelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def _snake_case ( self : str ): SCREAMING_SNAKE_CASE = self.get_image_processor() SCREAMING_SNAKE_CASE = self.get_tokenizer() SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) SCREAMING_SNAKE_CASE = "lower newer" SCREAMING_SNAKE_CASE = self.prepare_image_inputs() SCREAMING_SNAKE_CASE = processor(text=__lowerCamelCase , images=__lowerCamelCase ) self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] ) # test if it raises when no input is passed with pytest.raises(__lowerCamelCase ): processor() def _snake_case ( self : Any ): SCREAMING_SNAKE_CASE = self.get_image_processor() SCREAMING_SNAKE_CASE = self.get_tokenizer() SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) SCREAMING_SNAKE_CASE = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] SCREAMING_SNAKE_CASE = processor.batch_decode(__lowerCamelCase ) SCREAMING_SNAKE_CASE = tokenizer.batch_decode(__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) def _snake_case ( self : Dict ): SCREAMING_SNAKE_CASE = self.get_image_processor() SCREAMING_SNAKE_CASE = self.get_tokenizer() SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) SCREAMING_SNAKE_CASE = "lower newer" SCREAMING_SNAKE_CASE = self.prepare_image_inputs() SCREAMING_SNAKE_CASE = processor(text=__lowerCamelCase , images=__lowerCamelCase ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
698
import unittest from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available from transformers.pipelines import pipeline from transformers.pipelines.document_question_answering import apply_tesseract from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_detectrona, require_pytesseract, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image from transformers.image_utils import load_image else: class _SCREAMING_SNAKE_CASE : '''simple docstring''' @staticmethod def _snake_case ( *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : Union[str, Any] ): pass def __a ( A__ : str ): return None # This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace, # so we can expect it to be available. __A : Tuple = ( 'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png' ) @is_pipeline_test @require_torch @require_vision class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING @require_pytesseract @require_vision def _snake_case ( self : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] ): SCREAMING_SNAKE_CASE = pipeline( "document-question-answering" , model=__lowerCamelCase , tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) SCREAMING_SNAKE_CASE = INVOICE_URL SCREAMING_SNAKE_CASE = list(zip(*apply_tesseract(load_image(__lowerCamelCase ) , __lowerCamelCase , "" ) ) ) SCREAMING_SNAKE_CASE = "What is the placebo?" SCREAMING_SNAKE_CASE = [ { "image": load_image(__lowerCamelCase ), "question": question, }, { "image": image, "question": question, }, { "image": image, "question": question, "word_boxes": word_boxes, }, ] return dqa_pipeline, examples def _snake_case ( self : List[Any] , __lowerCamelCase : int , __lowerCamelCase : int ): SCREAMING_SNAKE_CASE = dqa_pipeline(__lowerCamelCase , top_k=2 ) self.assertEqual( __lowerCamelCase , [ [ {"score": ANY(__lowerCamelCase ), "answer": ANY(__lowerCamelCase ), "start": ANY(__lowerCamelCase ), "end": ANY(__lowerCamelCase )}, {"score": ANY(__lowerCamelCase ), "answer": ANY(__lowerCamelCase ), "start": ANY(__lowerCamelCase ), "end": ANY(__lowerCamelCase )}, ] ] * 3 , ) @require_torch @require_detectrona @require_pytesseract def _snake_case ( self : Optional[int] ): SCREAMING_SNAKE_CASE = pipeline("document-question-answering" , model="hf-internal-testing/tiny-random-layoutlmv2" ) SCREAMING_SNAKE_CASE = INVOICE_URL SCREAMING_SNAKE_CASE = "How many cats are there?" SCREAMING_SNAKE_CASE = [ {"score": 0.0_001, "answer": "oy 2312/2019", "start": 38, "end": 39}, {"score": 0.0_001, "answer": "oy 2312/2019 DUE", "start": 38, "end": 40}, ] SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 ) self.assertEqual(nested_simplify(__lowerCamelCase , decimals=4 ) , __lowerCamelCase ) SCREAMING_SNAKE_CASE = dqa_pipeline({"image": image, "question": question} , top_k=2 ) self.assertEqual(nested_simplify(__lowerCamelCase , decimals=4 ) , __lowerCamelCase ) # This image does not detect ANY text in it, meaning layoutlmv2 should fail. # Empty answer probably SCREAMING_SNAKE_CASE = "./tests/fixtures/tests_samples/COCO/000000039769.png" SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 ) self.assertEqual(__lowerCamelCase , [] ) # We can optionnally pass directly the words and bounding boxes SCREAMING_SNAKE_CASE = "./tests/fixtures/tests_samples/COCO/000000039769.png" SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , words=__lowerCamelCase , boxes=__lowerCamelCase , top_k=2 ) self.assertEqual(__lowerCamelCase , [] ) @slow @require_torch @require_detectrona @require_pytesseract def _snake_case ( self : List[str] ): SCREAMING_SNAKE_CASE = pipeline( "document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , ) SCREAMING_SNAKE_CASE = INVOICE_URL SCREAMING_SNAKE_CASE = "What is the invoice number?" SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 ) self.assertEqual( nested_simplify(__lowerCamelCase , decimals=4 ) , [ {"score": 0.9_944, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0_009, "answer": "us-001", "start": 16, "end": 16}, ] , ) SCREAMING_SNAKE_CASE = dqa_pipeline({"image": image, "question": question} , top_k=2 ) self.assertEqual( nested_simplify(__lowerCamelCase , decimals=4 ) , [ {"score": 0.9_944, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0_009, "answer": "us-001", "start": 16, "end": 16}, ] , ) SCREAMING_SNAKE_CASE = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 ) self.assertEqual( nested_simplify(__lowerCamelCase , decimals=4 ) , [ [ {"score": 0.9_944, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0_009, "answer": "us-001", "start": 16, "end": 16}, ], ] * 2 , ) @slow @require_torch @require_detectrona @require_pytesseract def _snake_case ( self : str ): SCREAMING_SNAKE_CASE = pipeline( "document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , max_seq_len=50 , ) SCREAMING_SNAKE_CASE = INVOICE_URL SCREAMING_SNAKE_CASE = "What is the invoice number?" SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 ) self.assertEqual( nested_simplify(__lowerCamelCase , decimals=4 ) , [ {"score": 0.9_974, "answer": "1110212019", "start": 23, "end": 23}, {"score": 0.9_948, "answer": "us-001", "start": 16, "end": 16}, ] , ) SCREAMING_SNAKE_CASE = dqa_pipeline({"image": image, "question": question} , top_k=2 ) self.assertEqual( nested_simplify(__lowerCamelCase , decimals=4 ) , [ {"score": 0.9_974, "answer": "1110212019", "start": 23, "end": 23}, {"score": 0.9_948, "answer": "us-001", "start": 16, "end": 16}, ] , ) SCREAMING_SNAKE_CASE = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 ) self.assertEqual( nested_simplify(__lowerCamelCase , decimals=4 ) , [ [ {"score": 0.9_974, "answer": "1110212019", "start": 23, "end": 23}, {"score": 0.9_948, "answer": "us-001", "start": 16, "end": 16}, ] ] * 2 , ) @slow @require_torch @require_pytesseract @require_vision def _snake_case ( self : str ): SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained( "impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=__lowerCamelCase ) SCREAMING_SNAKE_CASE = pipeline( "document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=__lowerCamelCase , revision="3dc6de3" , ) SCREAMING_SNAKE_CASE = INVOICE_URL SCREAMING_SNAKE_CASE = "What is the invoice number?" SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 ) self.assertEqual( nested_simplify(__lowerCamelCase , decimals=4 ) , [ {"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23}, ] , ) SCREAMING_SNAKE_CASE = dqa_pipeline({"image": image, "question": question} , top_k=2 ) self.assertEqual( nested_simplify(__lowerCamelCase , decimals=4 ) , [ {"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23}, ] , ) SCREAMING_SNAKE_CASE = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 ) self.assertEqual( nested_simplify(__lowerCamelCase , decimals=4 ) , [ [ {"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23}, ] ] * 2 , ) SCREAMING_SNAKE_CASE = list(zip(*apply_tesseract(load_image(__lowerCamelCase ) , __lowerCamelCase , "" ) ) ) # This model should also work if `image` is set to None SCREAMING_SNAKE_CASE = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 ) self.assertEqual( nested_simplify(__lowerCamelCase , decimals=4 ) , [ {"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23}, ] , ) @slow @require_torch @require_pytesseract @require_vision def _snake_case ( self : Dict ): SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained( "impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=__lowerCamelCase ) SCREAMING_SNAKE_CASE = pipeline( "document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=__lowerCamelCase , revision="3dc6de3" , max_seq_len=50 , ) SCREAMING_SNAKE_CASE = INVOICE_URL SCREAMING_SNAKE_CASE = "What is the invoice number?" SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 ) self.assertEqual( nested_simplify(__lowerCamelCase , decimals=4 ) , [ {"score": 0.9_999, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.9_998, "answer": "us-001", "start": 16, "end": 16}, ] , ) SCREAMING_SNAKE_CASE = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 ) self.assertEqual( nested_simplify(__lowerCamelCase , decimals=4 ) , [ [ {"score": 0.9_999, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.9_998, "answer": "us-001", "start": 16, "end": 16}, ] ] * 2 , ) SCREAMING_SNAKE_CASE = list(zip(*apply_tesseract(load_image(__lowerCamelCase ) , __lowerCamelCase , "" ) ) ) # This model should also work if `image` is set to None SCREAMING_SNAKE_CASE = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 ) self.assertEqual( nested_simplify(__lowerCamelCase , decimals=4 ) , [ {"score": 0.9_999, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.9_998, "answer": "us-001", "start": 16, "end": 16}, ] , ) @slow @require_torch def _snake_case ( self : List[str] ): SCREAMING_SNAKE_CASE = pipeline( "document-question-answering" , model="naver-clova-ix/donut-base-finetuned-docvqa" , tokenizer=AutoTokenizer.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa" ) , feature_extractor="naver-clova-ix/donut-base-finetuned-docvqa" , ) SCREAMING_SNAKE_CASE = INVOICE_URL SCREAMING_SNAKE_CASE = "What is the invoice number?" SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 ) self.assertEqual(nested_simplify(__lowerCamelCase , decimals=4 ) , [{"answer": "us-001"}] ) @require_tf @unittest.skip("Document question answering not implemented in TF" ) def _snake_case ( self : List[Any] ): pass
698
1
import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self : Dict ): SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__lowerCamelCase ) SCREAMING_SNAKE_CASE = -1 SCREAMING_SNAKE_CASE = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCamelCase ) SCREAMING_SNAKE_CASE = model.generate(__lowerCamelCase , max_new_tokens=10 , do_sample=__lowerCamelCase ) SCREAMING_SNAKE_CASE = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: SCREAMING_SNAKE_CASE = TextStreamer(__lowerCamelCase ) model.generate(__lowerCamelCase , max_new_tokens=10 , do_sample=__lowerCamelCase , streamer=__lowerCamelCase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer SCREAMING_SNAKE_CASE = cs.out[:-1] self.assertEqual(__lowerCamelCase , __lowerCamelCase ) def _snake_case ( self : int ): SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__lowerCamelCase ) SCREAMING_SNAKE_CASE = -1 SCREAMING_SNAKE_CASE = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCamelCase ) SCREAMING_SNAKE_CASE = model.generate(__lowerCamelCase , max_new_tokens=10 , do_sample=__lowerCamelCase ) SCREAMING_SNAKE_CASE = tokenizer.decode(greedy_ids[0] ) SCREAMING_SNAKE_CASE = TextIteratorStreamer(__lowerCamelCase ) SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer} SCREAMING_SNAKE_CASE = Thread(target=model.generate , kwargs=__lowerCamelCase ) thread.start() SCREAMING_SNAKE_CASE = "" for new_text in streamer: streamer_text += new_text self.assertEqual(__lowerCamelCase , __lowerCamelCase ) def _snake_case ( self : List[str] ): SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__lowerCamelCase ) SCREAMING_SNAKE_CASE = -1 SCREAMING_SNAKE_CASE = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCamelCase ) SCREAMING_SNAKE_CASE = model.generate(__lowerCamelCase , max_new_tokens=10 , do_sample=__lowerCamelCase ) SCREAMING_SNAKE_CASE = greedy_ids[:, input_ids.shape[1] :] SCREAMING_SNAKE_CASE = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: SCREAMING_SNAKE_CASE = TextStreamer(__lowerCamelCase , skip_prompt=__lowerCamelCase ) model.generate(__lowerCamelCase , max_new_tokens=10 , do_sample=__lowerCamelCase , streamer=__lowerCamelCase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer SCREAMING_SNAKE_CASE = cs.out[:-1] self.assertEqual(__lowerCamelCase , __lowerCamelCase ) def _snake_case ( self : int ): # Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested # with actual models -- the dummy models' tokenizers are not aligned with their models, and # `skip_special_tokens=True` has no effect on them SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("distilgpt2" ) SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(__lowerCamelCase ) SCREAMING_SNAKE_CASE = -1 SCREAMING_SNAKE_CASE = torch.ones((1, 5) , device=__lowerCamelCase ).long() * model.config.bos_token_id with CaptureStdout() as cs: SCREAMING_SNAKE_CASE = TextStreamer(__lowerCamelCase , skip_special_tokens=__lowerCamelCase ) model.generate(__lowerCamelCase , max_new_tokens=1 , do_sample=__lowerCamelCase , streamer=__lowerCamelCase ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token SCREAMING_SNAKE_CASE = cs.out[:-1] # Remove the final "\n" SCREAMING_SNAKE_CASE = tokenizer(__lowerCamelCase , return_tensors="pt" ) self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) ) def _snake_case ( self : Optional[int] ): SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__lowerCamelCase ) SCREAMING_SNAKE_CASE = -1 SCREAMING_SNAKE_CASE = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCamelCase ) SCREAMING_SNAKE_CASE = TextIteratorStreamer(__lowerCamelCase , timeout=0.001 ) SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer} SCREAMING_SNAKE_CASE = Thread(target=model.generate , kwargs=__lowerCamelCase ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(__lowerCamelCase ): SCREAMING_SNAKE_CASE = "" for new_text in streamer: streamer_text += new_text
698
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer from .base import PipelineTool class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = "facebook/bart-large-mnli" lowerCamelCase__ = ( "This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which " "should be the text to classify, and `labels`, which should be the list of labels to use for classification. " "It returns the most likely label in the list of provided `labels` for the input text." ) lowerCamelCase__ = "text_classifier" lowerCamelCase__ = AutoTokenizer lowerCamelCase__ = AutoModelForSequenceClassification lowerCamelCase__ = ["text", ["text"]] lowerCamelCase__ = ["text"] def _snake_case ( self : Optional[Any] ): super().setup() SCREAMING_SNAKE_CASE = self.model.config SCREAMING_SNAKE_CASE = -1 for idx, label in config.idalabel.items(): if label.lower().startswith("entail" ): SCREAMING_SNAKE_CASE = int(__lowerCamelCase ) if self.entailment_id == -1: raise ValueError("Could not determine the entailment ID from the model config, please pass it at init." ) def _snake_case ( self : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] ): SCREAMING_SNAKE_CASE = labels return self.pre_processor( [text] * len(__lowerCamelCase ) , [f"This example is {label}" for label in labels] , return_tensors="pt" , padding="max_length" , ) def _snake_case ( self : str , __lowerCamelCase : Optional[int] ): SCREAMING_SNAKE_CASE = outputs.logits SCREAMING_SNAKE_CASE = torch.argmax(logits[:, 2] ).item() return self._labels[label_id]
698
1
import argparse import hashlib # hashlib is only used inside the Test class import struct class _SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : Tuple , __lowerCamelCase : Optional[Any] ): SCREAMING_SNAKE_CASE = data SCREAMING_SNAKE_CASE = [0x67_452_301, 0xEF_CDA_B89, 0x98_BAD_CFE, 0x10_325_476, 0xC3_D2E_1F0] @staticmethod def _snake_case ( __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] ): return ((n << b) | (n >> (32 - b))) & 0xFF_FFF_FFF def _snake_case ( self : Optional[int] ): SCREAMING_SNAKE_CASE = b"\x80" + b"\x00" * (63 - (len(self.data ) + 8) % 64) SCREAMING_SNAKE_CASE = self.data + padding + struct.pack(">Q" , 8 * len(self.data ) ) return padded_data def _snake_case ( self : List[str] ): return [ self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 ) ] def _snake_case ( self : Optional[int] , __lowerCamelCase : Optional[Any] ): SCREAMING_SNAKE_CASE = list(struct.unpack(">16L" , __lowerCamelCase ) ) + [0] * 64 for i in range(16 , 80 ): SCREAMING_SNAKE_CASE = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 ) return w def _snake_case ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE = self.padding() SCREAMING_SNAKE_CASE = self.split_blocks() for block in self.blocks: SCREAMING_SNAKE_CASE = self.expand_block(__lowerCamelCase ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.h for i in range(0 , 80 ): if 0 <= i < 20: SCREAMING_SNAKE_CASE = (b & c) | ((~b) & d) SCREAMING_SNAKE_CASE = 0x5A_827_999 elif 20 <= i < 40: SCREAMING_SNAKE_CASE = b ^ c ^ d SCREAMING_SNAKE_CASE = 0x6E_D9E_BA1 elif 40 <= i < 60: SCREAMING_SNAKE_CASE = (b & c) | (b & d) | (c & d) SCREAMING_SNAKE_CASE = 0x8F_1BB_CDC elif 60 <= i < 80: SCREAMING_SNAKE_CASE = b ^ c ^ d SCREAMING_SNAKE_CASE = 0xCA_62C_1D6 SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = ( self.rotate(__lowerCamelCase , 5 ) + f + e + k + expanded_block[i] & 0xFF_FFF_FFF, a, self.rotate(__lowerCamelCase , 30 ), c, d, ) SCREAMING_SNAKE_CASE = ( self.h[0] + a & 0xFF_FFF_FFF, self.h[1] + b & 0xFF_FFF_FFF, self.h[2] + c & 0xFF_FFF_FFF, self.h[3] + d & 0xFF_FFF_FFF, self.h[4] + e & 0xFF_FFF_FFF, ) return ("{:08x}" * 5).format(*self.h ) def __a ( ): SCREAMING_SNAKE_CASE = b"Test String" assert SHAaHash(A__ ).final_hash() == hashlib.shaa(A__ ).hexdigest() # noqa: S324 def __a ( ): SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description="Process some strings or files" ) parser.add_argument( "--string" , dest="input_string" , default="Hello World!! Welcome to Cryptography" , help="Hash the string" , ) parser.add_argument("--file" , dest="input_file" , help="Hash contents of a file" ) SCREAMING_SNAKE_CASE = parser.parse_args() SCREAMING_SNAKE_CASE = args.input_string # In any case hash input should be a bytestring if args.input_file: with open(args.input_file , "rb" ) as f: SCREAMING_SNAKE_CASE = f.read() else: SCREAMING_SNAKE_CASE = bytes(A__ , "utf-8" ) print(SHAaHash(A__ ).final_hash() ) if __name__ == "__main__": main() import doctest doctest.testmod()
698
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.test_utils import execute_subprocess_async def __a ( A__ : str=None ): if subparsers is not None: SCREAMING_SNAKE_CASE = subparsers.add_parser("test" ) else: SCREAMING_SNAKE_CASE = argparse.ArgumentParser("Accelerate test command" ) parser.add_argument( "--config_file" , default=A__ , help=( "The path to use to store the config file. Will default to a file named default_config.yaml in the cache " "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have " "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed " "with 'huggingface'." ) , ) if subparsers is not None: parser.set_defaults(func=A__ ) return parser def __a ( A__ : Tuple ): SCREAMING_SNAKE_CASE = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] ) if args.config_file is None: SCREAMING_SNAKE_CASE = script_name else: SCREAMING_SNAKE_CASE = F"--config_file={args.config_file} {script_name}" SCREAMING_SNAKE_CASE = ["accelerate-launch"] + test_args.split() SCREAMING_SNAKE_CASE = execute_subprocess_async(A__ , env=os.environ.copy() ) if result.returncode == 0: print("Test is a success! You are ready for your distributed training!" ) def __a ( ): SCREAMING_SNAKE_CASE = test_command_parser() SCREAMING_SNAKE_CASE = parser.parse_args() test_command(A__ ) if __name__ == "__main__": main()
698
1
from datetime import datetime as dt import os from github import Github __A : int = [ 'good first issue', 'good second issue', 'good difficult issue', 'feature request', 'new model', 'wip', ] def __a ( ): SCREAMING_SNAKE_CASE = Github(os.environ["GITHUB_TOKEN"] ) SCREAMING_SNAKE_CASE = g.get_repo("huggingface/transformers" ) SCREAMING_SNAKE_CASE = repo.get_issues(state="open" ) for issue in open_issues: SCREAMING_SNAKE_CASE = sorted([comment for comment in issue.get_comments()] , key=lambda A__ : i.created_at , reverse=A__ ) SCREAMING_SNAKE_CASE = comments[0] if len(A__ ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.") issue.edit(state="closed" ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would add stale comment to {issue.number}") issue.create_comment( "This issue has been automatically marked as stale because it has not had " "recent activity. If you think this still needs to be addressed " "please comment on this thread.\n\nPlease note that issues that do not follow the " "[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) " "are likely to be ignored." ) if __name__ == "__main__": main()
698
import json import os from functools import lru_cache from typing import TYPE_CHECKING, List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation __A : Optional[int] = logging.get_logger(__name__) __A : List[str] = { 'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_config_file': 'tokenizer_config.json', } __A : Tuple = { 'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'}, 'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'}, 'tokenizer_config_file': { 'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json' }, } __A : Dict = {'facebook/blenderbot-3B': 1_2_8} @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def __a ( ): SCREAMING_SNAKE_CASE = ( list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) ) ) SCREAMING_SNAKE_CASE = bs[:] SCREAMING_SNAKE_CASE = 0 for b in range(2**8 ): if b not in bs: bs.append(A__ ) cs.append(2**8 + n ) n += 1 SCREAMING_SNAKE_CASE = [chr(A__ ) for n in cs] return dict(zip(A__ , A__ ) ) def __a ( A__ : Optional[int] ): SCREAMING_SNAKE_CASE = set() SCREAMING_SNAKE_CASE = word[0] for char in word[1:]: pairs.add((prev_char, char) ) SCREAMING_SNAKE_CASE = char return pairs class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = VOCAB_FILES_NAMES lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__ = ["input_ids", "attention_mask"] def __init__( self : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : List[Any]="replace" , __lowerCamelCase : Any="<s>" , __lowerCamelCase : List[Any]="</s>" , __lowerCamelCase : Dict="</s>" , __lowerCamelCase : Any="<s>" , __lowerCamelCase : Optional[Any]="<unk>" , __lowerCamelCase : str="<pad>" , __lowerCamelCase : List[Any]="<mask>" , __lowerCamelCase : List[Any]=False , **__lowerCamelCase : Tuple , ): SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token super().__init__( errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , **__lowerCamelCase , ) with open(__lowerCamelCase , encoding="utf-8" ) as vocab_handle: SCREAMING_SNAKE_CASE = json.load(__lowerCamelCase ) SCREAMING_SNAKE_CASE = {v: k for k, v in self.encoder.items()} SCREAMING_SNAKE_CASE = errors # how to handle errors in decoding SCREAMING_SNAKE_CASE = bytes_to_unicode() SCREAMING_SNAKE_CASE = {v: k for k, v in self.byte_encoder.items()} with open(__lowerCamelCase , encoding="utf-8" ) as merges_handle: SCREAMING_SNAKE_CASE = merges_handle.read().split("\n" )[1:-1] SCREAMING_SNAKE_CASE = [tuple(merge.split() ) for merge in bpe_merges] SCREAMING_SNAKE_CASE = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) ) SCREAMING_SNAKE_CASE = {} SCREAMING_SNAKE_CASE = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions SCREAMING_SNAKE_CASE = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" ) @property # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot def _snake_case ( self : Dict ): return len(self.encoder ) def _snake_case ( self : int ): return dict(self.encoder , **self.added_tokens_encoder ) def _snake_case ( self : Tuple , __lowerCamelCase : List[str] ): if token in self.cache: return self.cache[token] SCREAMING_SNAKE_CASE = tuple(__lowerCamelCase ) SCREAMING_SNAKE_CASE = get_pairs(__lowerCamelCase ) if not pairs: return token while True: SCREAMING_SNAKE_CASE = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float("inf" ) ) ) if bigram not in self.bpe_ranks: break SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = bigram SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = 0 while i < len(__lowerCamelCase ): try: SCREAMING_SNAKE_CASE = word.index(__lowerCamelCase , __lowerCamelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) SCREAMING_SNAKE_CASE = j if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 SCREAMING_SNAKE_CASE = tuple(__lowerCamelCase ) SCREAMING_SNAKE_CASE = new_word if len(__lowerCamelCase ) == 1: break else: SCREAMING_SNAKE_CASE = get_pairs(__lowerCamelCase ) SCREAMING_SNAKE_CASE = " ".join(__lowerCamelCase ) SCREAMING_SNAKE_CASE = word return word def _snake_case ( self : str , __lowerCamelCase : Dict ): SCREAMING_SNAKE_CASE = [] for token in re.findall(self.pat , __lowerCamelCase ): SCREAMING_SNAKE_CASE = "".join( self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowerCamelCase ).split(" " ) ) return bpe_tokens def _snake_case ( self : List[str] , __lowerCamelCase : str ): return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) ) def _snake_case ( self : Optional[int] , __lowerCamelCase : Tuple ): return self.decoder.get(__lowerCamelCase ) def _snake_case ( self : Tuple , __lowerCamelCase : str ): SCREAMING_SNAKE_CASE = "".join(__lowerCamelCase ) SCREAMING_SNAKE_CASE = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors ) return text def _snake_case ( self : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ): if not os.path.isdir(__lowerCamelCase ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return SCREAMING_SNAKE_CASE = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) SCREAMING_SNAKE_CASE = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + "\n" ) SCREAMING_SNAKE_CASE = 0 with open(__lowerCamelCase , "w" , encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase : kv[1] ): if index != token_index: logger.warning( f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive." " Please check that the tokenizer is not corrupted!" ) SCREAMING_SNAKE_CASE = token_index writer.write(" ".join(__lowerCamelCase ) + "\n" ) index += 1 return vocab_file, merge_file def _snake_case ( self : Tuple , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase ) if token_ids_a is None: return [1] + ([0] * len(__lowerCamelCase )) + [1] return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1] def _snake_case ( self : str , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ): SCREAMING_SNAKE_CASE = [self.sep_token_id] SCREAMING_SNAKE_CASE = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _snake_case ( self : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict=False , **__lowerCamelCase : Optional[int] ): SCREAMING_SNAKE_CASE = kwargs.pop("add_prefix_space" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(__lowerCamelCase ) > 0 and not text[0].isspace()): SCREAMING_SNAKE_CASE = " " + text return (text, kwargs) def _snake_case ( self : Dict , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ): return token_ids_a + [self.eos_token_id] def _snake_case ( self : Tuple , __lowerCamelCase : "Conversation" ): SCREAMING_SNAKE_CASE = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(" " + text ) else: # Generated responses should contain them already. inputs.append(__lowerCamelCase ) SCREAMING_SNAKE_CASE = " ".join(__lowerCamelCase ) SCREAMING_SNAKE_CASE = self.encode(__lowerCamelCase ) if len(__lowerCamelCase ) > self.model_max_length: SCREAMING_SNAKE_CASE = input_ids[-self.model_max_length :] logger.warning(f"Trimmed input from conversation as it was longer than {self.model_max_length} tokens." ) return input_ids
698
1
import unittest from transformers import is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class _SCREAMING_SNAKE_CASE : '''simple docstring''' @staticmethod def _snake_case ( *__lowerCamelCase : Any , **__lowerCamelCase : Any ): pass @is_pipeline_test @require_vision class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' @require_torch def _snake_case ( self : Dict ): SCREAMING_SNAKE_CASE = pipeline( model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , ) SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) SCREAMING_SNAKE_CASE = image_classifier(__lowerCamelCase , candidate_labels=["a", "b", "c"] ) # The floating scores are so close, we enter floating error approximation and the order is not guaranteed across # python and torch versions. self.assertIn( nested_simplify(__lowerCamelCase ) , [ [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}], [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}], ] , ) SCREAMING_SNAKE_CASE = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 ) self.assertEqual( nested_simplify(__lowerCamelCase ) , [ [ {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, ], [ {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, ], [ {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, ], [ {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, ], [ {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, ], ] , ) @require_tf def _snake_case ( self : int ): SCREAMING_SNAKE_CASE = pipeline( model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , framework="tf" ) SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) SCREAMING_SNAKE_CASE = image_classifier(__lowerCamelCase , candidate_labels=["a", "b", "c"] ) self.assertEqual( nested_simplify(__lowerCamelCase ) , [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}] , ) SCREAMING_SNAKE_CASE = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 ) self.assertEqual( nested_simplify(__lowerCamelCase ) , [ [ {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, ], [ {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, ], [ {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, ], [ {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, ], [ {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, ], ] , ) @slow @require_torch def _snake_case ( self : Tuple ): SCREAMING_SNAKE_CASE = pipeline( task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , ) # This is an image of 2 cats with remotes and no planes SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) SCREAMING_SNAKE_CASE = image_classifier(__lowerCamelCase , candidate_labels=["cat", "plane", "remote"] ) self.assertEqual( nested_simplify(__lowerCamelCase ) , [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ] , ) SCREAMING_SNAKE_CASE = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 ) self.assertEqual( nested_simplify(__lowerCamelCase ) , [ [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ] * 5 , ) @slow @require_tf def _snake_case ( self : Any ): SCREAMING_SNAKE_CASE = pipeline( task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , framework="tf" ) # This is an image of 2 cats with remotes and no planes SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) SCREAMING_SNAKE_CASE = image_classifier(__lowerCamelCase , candidate_labels=["cat", "plane", "remote"] ) self.assertEqual( nested_simplify(__lowerCamelCase ) , [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ] , ) SCREAMING_SNAKE_CASE = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 ) self.assertEqual( nested_simplify(__lowerCamelCase ) , [ [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ] * 5 , )
698
from __future__ import annotations from cmath import sqrt def __a ( A__ : int , A__ : int , A__ : int ): if a == 0: raise ValueError("Coefficient 'a' must not be zero." ) SCREAMING_SNAKE_CASE = b * b - 4 * a * c SCREAMING_SNAKE_CASE = (-b + sqrt(A__ )) / (2 * a) SCREAMING_SNAKE_CASE = (-b - sqrt(A__ )) / (2 * a) return ( root_a.real if not root_a.imag else root_a, root_a.real if not root_a.imag else root_a, ) def __a ( ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = quadratic_roots(a=5 , b=6 , c=1 ) print(F"The solutions are: {solutiona} and {solutiona}" ) if __name__ == "__main__": main()
698
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available __A : Any = { 'configuration_longt5': ['LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LongT5Config', 'LongT5OnnxConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : List[str] = [ 'LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST', 'LongT5EncoderModel', 'LongT5ForConditionalGeneration', 'LongT5Model', 'LongT5PreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : List[Any] = [ 'FlaxLongT5ForConditionalGeneration', 'FlaxLongT5Model', 'FlaxLongT5PreTrainedModel', ] if TYPE_CHECKING: from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longta import ( LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST, LongTaEncoderModel, LongTaForConditionalGeneration, LongTaModel, LongTaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_longta import ( FlaxLongTaForConditionalGeneration, FlaxLongTaModel, FlaxLongTaPreTrainedModel, ) else: import sys __A : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
698
import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self : Dict ): SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__lowerCamelCase ) SCREAMING_SNAKE_CASE = -1 SCREAMING_SNAKE_CASE = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCamelCase ) SCREAMING_SNAKE_CASE = model.generate(__lowerCamelCase , max_new_tokens=10 , do_sample=__lowerCamelCase ) SCREAMING_SNAKE_CASE = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: SCREAMING_SNAKE_CASE = TextStreamer(__lowerCamelCase ) model.generate(__lowerCamelCase , max_new_tokens=10 , do_sample=__lowerCamelCase , streamer=__lowerCamelCase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer SCREAMING_SNAKE_CASE = cs.out[:-1] self.assertEqual(__lowerCamelCase , __lowerCamelCase ) def _snake_case ( self : int ): SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__lowerCamelCase ) SCREAMING_SNAKE_CASE = -1 SCREAMING_SNAKE_CASE = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCamelCase ) SCREAMING_SNAKE_CASE = model.generate(__lowerCamelCase , max_new_tokens=10 , do_sample=__lowerCamelCase ) SCREAMING_SNAKE_CASE = tokenizer.decode(greedy_ids[0] ) SCREAMING_SNAKE_CASE = TextIteratorStreamer(__lowerCamelCase ) SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer} SCREAMING_SNAKE_CASE = Thread(target=model.generate , kwargs=__lowerCamelCase ) thread.start() SCREAMING_SNAKE_CASE = "" for new_text in streamer: streamer_text += new_text self.assertEqual(__lowerCamelCase , __lowerCamelCase ) def _snake_case ( self : List[str] ): SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__lowerCamelCase ) SCREAMING_SNAKE_CASE = -1 SCREAMING_SNAKE_CASE = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCamelCase ) SCREAMING_SNAKE_CASE = model.generate(__lowerCamelCase , max_new_tokens=10 , do_sample=__lowerCamelCase ) SCREAMING_SNAKE_CASE = greedy_ids[:, input_ids.shape[1] :] SCREAMING_SNAKE_CASE = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: SCREAMING_SNAKE_CASE = TextStreamer(__lowerCamelCase , skip_prompt=__lowerCamelCase ) model.generate(__lowerCamelCase , max_new_tokens=10 , do_sample=__lowerCamelCase , streamer=__lowerCamelCase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer SCREAMING_SNAKE_CASE = cs.out[:-1] self.assertEqual(__lowerCamelCase , __lowerCamelCase ) def _snake_case ( self : int ): # Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested # with actual models -- the dummy models' tokenizers are not aligned with their models, and # `skip_special_tokens=True` has no effect on them SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("distilgpt2" ) SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(__lowerCamelCase ) SCREAMING_SNAKE_CASE = -1 SCREAMING_SNAKE_CASE = torch.ones((1, 5) , device=__lowerCamelCase ).long() * model.config.bos_token_id with CaptureStdout() as cs: SCREAMING_SNAKE_CASE = TextStreamer(__lowerCamelCase , skip_special_tokens=__lowerCamelCase ) model.generate(__lowerCamelCase , max_new_tokens=1 , do_sample=__lowerCamelCase , streamer=__lowerCamelCase ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token SCREAMING_SNAKE_CASE = cs.out[:-1] # Remove the final "\n" SCREAMING_SNAKE_CASE = tokenizer(__lowerCamelCase , return_tensors="pt" ) self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) ) def _snake_case ( self : Optional[int] ): SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__lowerCamelCase ) SCREAMING_SNAKE_CASE = -1 SCREAMING_SNAKE_CASE = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCamelCase ) SCREAMING_SNAKE_CASE = TextIteratorStreamer(__lowerCamelCase , timeout=0.001 ) SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer} SCREAMING_SNAKE_CASE = Thread(target=model.generate , kwargs=__lowerCamelCase ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(__lowerCamelCase ): SCREAMING_SNAKE_CASE = "" for new_text in streamer: streamer_text += new_text
698
1
import unittest from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available from transformers.pipelines import pipeline from transformers.pipelines.document_question_answering import apply_tesseract from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_detectrona, require_pytesseract, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image from transformers.image_utils import load_image else: class _SCREAMING_SNAKE_CASE : '''simple docstring''' @staticmethod def _snake_case ( *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : Union[str, Any] ): pass def __a ( A__ : str ): return None # This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace, # so we can expect it to be available. __A : Tuple = ( 'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png' ) @is_pipeline_test @require_torch @require_vision class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING @require_pytesseract @require_vision def _snake_case ( self : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] ): SCREAMING_SNAKE_CASE = pipeline( "document-question-answering" , model=__lowerCamelCase , tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) SCREAMING_SNAKE_CASE = INVOICE_URL SCREAMING_SNAKE_CASE = list(zip(*apply_tesseract(load_image(__lowerCamelCase ) , __lowerCamelCase , "" ) ) ) SCREAMING_SNAKE_CASE = "What is the placebo?" SCREAMING_SNAKE_CASE = [ { "image": load_image(__lowerCamelCase ), "question": question, }, { "image": image, "question": question, }, { "image": image, "question": question, "word_boxes": word_boxes, }, ] return dqa_pipeline, examples def _snake_case ( self : List[Any] , __lowerCamelCase : int , __lowerCamelCase : int ): SCREAMING_SNAKE_CASE = dqa_pipeline(__lowerCamelCase , top_k=2 ) self.assertEqual( __lowerCamelCase , [ [ {"score": ANY(__lowerCamelCase ), "answer": ANY(__lowerCamelCase ), "start": ANY(__lowerCamelCase ), "end": ANY(__lowerCamelCase )}, {"score": ANY(__lowerCamelCase ), "answer": ANY(__lowerCamelCase ), "start": ANY(__lowerCamelCase ), "end": ANY(__lowerCamelCase )}, ] ] * 3 , ) @require_torch @require_detectrona @require_pytesseract def _snake_case ( self : Optional[int] ): SCREAMING_SNAKE_CASE = pipeline("document-question-answering" , model="hf-internal-testing/tiny-random-layoutlmv2" ) SCREAMING_SNAKE_CASE = INVOICE_URL SCREAMING_SNAKE_CASE = "How many cats are there?" SCREAMING_SNAKE_CASE = [ {"score": 0.0_001, "answer": "oy 2312/2019", "start": 38, "end": 39}, {"score": 0.0_001, "answer": "oy 2312/2019 DUE", "start": 38, "end": 40}, ] SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 ) self.assertEqual(nested_simplify(__lowerCamelCase , decimals=4 ) , __lowerCamelCase ) SCREAMING_SNAKE_CASE = dqa_pipeline({"image": image, "question": question} , top_k=2 ) self.assertEqual(nested_simplify(__lowerCamelCase , decimals=4 ) , __lowerCamelCase ) # This image does not detect ANY text in it, meaning layoutlmv2 should fail. # Empty answer probably SCREAMING_SNAKE_CASE = "./tests/fixtures/tests_samples/COCO/000000039769.png" SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 ) self.assertEqual(__lowerCamelCase , [] ) # We can optionnally pass directly the words and bounding boxes SCREAMING_SNAKE_CASE = "./tests/fixtures/tests_samples/COCO/000000039769.png" SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , words=__lowerCamelCase , boxes=__lowerCamelCase , top_k=2 ) self.assertEqual(__lowerCamelCase , [] ) @slow @require_torch @require_detectrona @require_pytesseract def _snake_case ( self : List[str] ): SCREAMING_SNAKE_CASE = pipeline( "document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , ) SCREAMING_SNAKE_CASE = INVOICE_URL SCREAMING_SNAKE_CASE = "What is the invoice number?" SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 ) self.assertEqual( nested_simplify(__lowerCamelCase , decimals=4 ) , [ {"score": 0.9_944, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0_009, "answer": "us-001", "start": 16, "end": 16}, ] , ) SCREAMING_SNAKE_CASE = dqa_pipeline({"image": image, "question": question} , top_k=2 ) self.assertEqual( nested_simplify(__lowerCamelCase , decimals=4 ) , [ {"score": 0.9_944, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0_009, "answer": "us-001", "start": 16, "end": 16}, ] , ) SCREAMING_SNAKE_CASE = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 ) self.assertEqual( nested_simplify(__lowerCamelCase , decimals=4 ) , [ [ {"score": 0.9_944, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0_009, "answer": "us-001", "start": 16, "end": 16}, ], ] * 2 , ) @slow @require_torch @require_detectrona @require_pytesseract def _snake_case ( self : str ): SCREAMING_SNAKE_CASE = pipeline( "document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , max_seq_len=50 , ) SCREAMING_SNAKE_CASE = INVOICE_URL SCREAMING_SNAKE_CASE = "What is the invoice number?" SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 ) self.assertEqual( nested_simplify(__lowerCamelCase , decimals=4 ) , [ {"score": 0.9_974, "answer": "1110212019", "start": 23, "end": 23}, {"score": 0.9_948, "answer": "us-001", "start": 16, "end": 16}, ] , ) SCREAMING_SNAKE_CASE = dqa_pipeline({"image": image, "question": question} , top_k=2 ) self.assertEqual( nested_simplify(__lowerCamelCase , decimals=4 ) , [ {"score": 0.9_974, "answer": "1110212019", "start": 23, "end": 23}, {"score": 0.9_948, "answer": "us-001", "start": 16, "end": 16}, ] , ) SCREAMING_SNAKE_CASE = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 ) self.assertEqual( nested_simplify(__lowerCamelCase , decimals=4 ) , [ [ {"score": 0.9_974, "answer": "1110212019", "start": 23, "end": 23}, {"score": 0.9_948, "answer": "us-001", "start": 16, "end": 16}, ] ] * 2 , ) @slow @require_torch @require_pytesseract @require_vision def _snake_case ( self : str ): SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained( "impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=__lowerCamelCase ) SCREAMING_SNAKE_CASE = pipeline( "document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=__lowerCamelCase , revision="3dc6de3" , ) SCREAMING_SNAKE_CASE = INVOICE_URL SCREAMING_SNAKE_CASE = "What is the invoice number?" SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 ) self.assertEqual( nested_simplify(__lowerCamelCase , decimals=4 ) , [ {"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23}, ] , ) SCREAMING_SNAKE_CASE = dqa_pipeline({"image": image, "question": question} , top_k=2 ) self.assertEqual( nested_simplify(__lowerCamelCase , decimals=4 ) , [ {"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23}, ] , ) SCREAMING_SNAKE_CASE = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 ) self.assertEqual( nested_simplify(__lowerCamelCase , decimals=4 ) , [ [ {"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23}, ] ] * 2 , ) SCREAMING_SNAKE_CASE = list(zip(*apply_tesseract(load_image(__lowerCamelCase ) , __lowerCamelCase , "" ) ) ) # This model should also work if `image` is set to None SCREAMING_SNAKE_CASE = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 ) self.assertEqual( nested_simplify(__lowerCamelCase , decimals=4 ) , [ {"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23}, ] , ) @slow @require_torch @require_pytesseract @require_vision def _snake_case ( self : Dict ): SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained( "impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=__lowerCamelCase ) SCREAMING_SNAKE_CASE = pipeline( "document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=__lowerCamelCase , revision="3dc6de3" , max_seq_len=50 , ) SCREAMING_SNAKE_CASE = INVOICE_URL SCREAMING_SNAKE_CASE = "What is the invoice number?" SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 ) self.assertEqual( nested_simplify(__lowerCamelCase , decimals=4 ) , [ {"score": 0.9_999, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.9_998, "answer": "us-001", "start": 16, "end": 16}, ] , ) SCREAMING_SNAKE_CASE = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 ) self.assertEqual( nested_simplify(__lowerCamelCase , decimals=4 ) , [ [ {"score": 0.9_999, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.9_998, "answer": "us-001", "start": 16, "end": 16}, ] ] * 2 , ) SCREAMING_SNAKE_CASE = list(zip(*apply_tesseract(load_image(__lowerCamelCase ) , __lowerCamelCase , "" ) ) ) # This model should also work if `image` is set to None SCREAMING_SNAKE_CASE = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 ) self.assertEqual( nested_simplify(__lowerCamelCase , decimals=4 ) , [ {"score": 0.9_999, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.9_998, "answer": "us-001", "start": 16, "end": 16}, ] , ) @slow @require_torch def _snake_case ( self : List[str] ): SCREAMING_SNAKE_CASE = pipeline( "document-question-answering" , model="naver-clova-ix/donut-base-finetuned-docvqa" , tokenizer=AutoTokenizer.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa" ) , feature_extractor="naver-clova-ix/donut-base-finetuned-docvqa" , ) SCREAMING_SNAKE_CASE = INVOICE_URL SCREAMING_SNAKE_CASE = "What is the invoice number?" SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 ) self.assertEqual(nested_simplify(__lowerCamelCase , decimals=4 ) , [{"answer": "us-001"}] ) @require_tf @unittest.skip("Document question answering not implemented in TF" ) def _snake_case ( self : List[Any] ): pass
698
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) __A : Union[str, Any] = {'configuration_fnet': ['FNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FNetConfig']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : List[str] = ['FNetTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Any = ['FNetTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Union[str, Any] = [ 'FNET_PRETRAINED_MODEL_ARCHIVE_LIST', 'FNetForMaskedLM', 'FNetForMultipleChoice', 'FNetForNextSentencePrediction', 'FNetForPreTraining', 'FNetForQuestionAnswering', 'FNetForSequenceClassification', 'FNetForTokenClassification', 'FNetLayer', 'FNetModel', 'FNetPreTrainedModel', ] if TYPE_CHECKING: from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet import FNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet_fast import FNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_fnet import ( FNET_PRETRAINED_MODEL_ARCHIVE_LIST, FNetForMaskedLM, FNetForMultipleChoice, FNetForNextSentencePrediction, FNetForPreTraining, FNetForQuestionAnswering, FNetForSequenceClassification, FNetForTokenClassification, FNetLayer, FNetModel, FNetPreTrainedModel, ) else: import sys __A : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
698
1
from __future__ import annotations import copy import tempfile import unittest from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available from transformers.testing_utils import ( DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, RequestCounter, require_tensorflow_probability, require_tf, slow, ) from ..bert.test_modeling_bert import BertModelTester if is_tf_available(): from transformers import ( TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeqaSeqLM, TFAutoModelForSequenceClassification, TFAutoModelForTableQuestionAnswering, TFAutoModelForTokenClassification, TFAutoModelWithLMHead, TFBertForMaskedLM, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertModel, TFFunnelBaseModel, TFFunnelModel, TFGPTaLMHeadModel, TFRobertaForMaskedLM, TFTaForConditionalGeneration, TFTapasForQuestionAnswering, ) from transformers.models.auto.modeling_tf_auto import ( TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TF_MODEL_MAPPING, ) from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = "new-model" if is_tf_available(): class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = NewModelConfig @require_tf class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' @slow def _snake_case ( self : str ): SCREAMING_SNAKE_CASE = "bert-base-cased" SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) @slow def _snake_case ( self : str ): SCREAMING_SNAKE_CASE = "bert-base-cased" SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE = TFAutoModelForPreTraining.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) @slow def _snake_case ( self : Optional[Any] ): for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE = TFAutoModelForCausalLM.from_pretrained(__lowerCamelCase ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = TFAutoModelForCausalLM.from_pretrained(__lowerCamelCase , output_loading_info=__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) @slow def _snake_case ( self : Union[str, Any] ): for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE = TFAutoModelWithLMHead.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) @slow def _snake_case ( self : str ): for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE = TFAutoModelForMaskedLM.from_pretrained(__lowerCamelCase ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = TFAutoModelForMaskedLM.from_pretrained(__lowerCamelCase , output_loading_info=__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) @slow def _snake_case ( self : str ): for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE = TFAutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = TFAutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase , output_loading_info=__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) @slow def _snake_case ( self : int ): # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE = TFAutoModelForSequenceClassification.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) @slow def _snake_case ( self : Optional[int] ): # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE = TFAutoModelForQuestionAnswering.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) @slow @require_tensorflow_probability def _snake_case ( self : int ): for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]: SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE = TFAutoModelForTableQuestionAnswering.from_pretrained(__lowerCamelCase ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = TFAutoModelForTableQuestionAnswering.from_pretrained( __lowerCamelCase , output_loading_info=__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) def _snake_case ( self : Tuple ): SCREAMING_SNAKE_CASE = TFAutoModelWithLMHead.from_pretrained(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) self.assertEqual(model.num_parameters() , 14410 ) self.assertEqual(model.num_parameters(only_trainable=__lowerCamelCase ) , 14410 ) def _snake_case ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE = TFAutoModelWithLMHead.from_pretrained(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) self.assertEqual(model.num_parameters() , 14410 ) self.assertEqual(model.num_parameters(only_trainable=__lowerCamelCase ) , 14410 ) def _snake_case ( self : List[str] ): # For the auto model mapping, FunnelConfig has two models: FunnelModel and FunnelBaseModel SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("sgugger/funnel-random-tiny" ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE = copy.deepcopy(model.config ) SCREAMING_SNAKE_CASE = ["FunnelBaseModel"] SCREAMING_SNAKE_CASE = TFAutoModel.from_config(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(__lowerCamelCase ) SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) def _snake_case ( self : List[str] ): try: AutoConfig.register("new-model" , __lowerCamelCase ) SCREAMING_SNAKE_CASE = [ TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSequenceClassification, TFAutoModelForTokenClassification, ] for auto_class in auto_classes: with self.subTest(auto_class.__name__ ): # Wrong config class will raise an error with self.assertRaises(__lowerCamelCase ): auto_class.register(__lowerCamelCase , __lowerCamelCase ) auto_class.register(__lowerCamelCase , __lowerCamelCase ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(__lowerCamelCase ): auto_class.register(__lowerCamelCase , __lowerCamelCase ) # Now that the config is registered, it can be used as any other config with the auto-API SCREAMING_SNAKE_CASE = BertModelTester(self ).get_config() SCREAMING_SNAKE_CASE = NewModelConfig(**tiny_config.to_dict() ) SCREAMING_SNAKE_CASE = auto_class.from_config(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(__lowerCamelCase ) SCREAMING_SNAKE_CASE = auto_class.from_pretrained(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) finally: if "new-model" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["new-model"] for mapping in ( TF_MODEL_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, ): if NewModelConfig in mapping._extra_content: del mapping._extra_content[NewModelConfig] def _snake_case ( self : Union[str, Any] ): with self.assertRaisesRegex( __lowerCamelCase , "bert-base is not a local folder and is not a valid model identifier" ): SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("bert-base" ) def _snake_case ( self : List[Any] ): with self.assertRaisesRegex( __lowerCamelCase , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ): SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained(__lowerCamelCase , revision="aaaaaa" ) def _snake_case ( self : Dict ): with self.assertRaisesRegex( __lowerCamelCase , "hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin" , ): SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("hf-internal-testing/config-no-model" ) def _snake_case ( self : List[str] ): with self.assertRaisesRegex(__lowerCamelCase , "Use `from_pt=True` to load this model" ): SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only" ) def _snake_case ( self : Union[str, Any] ): # Make sure we have cached the model. SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert" ) with RequestCounter() as counter: SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert" ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 ) # With a sharded checkpoint SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded" ) with RequestCounter() as counter: SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded" ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 )
698
from collections import Counter import numpy as np from sklearn import datasets from sklearn.model_selection import train_test_split __A : Optional[Any] = datasets.load_iris() __A : Optional[Any] = np.array(data['data']) __A : Optional[int] = np.array(data['target']) __A : Union[str, Any] = data['target_names'] __A , __A , __A , __A : Optional[int] = train_test_split(X, y) def __a ( A__ : Optional[int] , A__ : Dict ): return np.linalg.norm(np.array(A__ ) - np.array(A__ ) ) def __a ( A__ : Optional[Any] , A__ : int , A__ : Dict , A__ : Optional[Any] , A__ : Dict=5 ): SCREAMING_SNAKE_CASE = zip(A__ , A__ ) # List of distances of all points from the point to be classified SCREAMING_SNAKE_CASE = [] for data_point in data: SCREAMING_SNAKE_CASE = euclidean_distance(data_point[0] , A__ ) distances.append((distance, data_point[1]) ) # Choosing 'k' points with the least distances. SCREAMING_SNAKE_CASE = [i[1] for i in sorted(A__ )[:k]] # Most commonly occurring class among them # is the class into which the point is classified SCREAMING_SNAKE_CASE = Counter(A__ ).most_common(1 )[0][0] return classes[result] if __name__ == "__main__": print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
698
1
from __future__ import annotations __A : Tuple = [-1_0, -5, 0, 5, 5.1, 1_1, 1_3, 2_1, 3, 4, -2_1, -1_0, -5, -1, 0] __A : Any = [-5, 0, 5, 5.1, 1_1, 1_3, 2_1, -1, 4, -1, -1_0, -5, -1, 0, -1] def __a ( A__ : list[float] ): SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = len(A__ ) for i in range(A__ ): SCREAMING_SNAKE_CASE = -1 for j in range(i + 1 , A__ ): if arr[i] < arr[j]: SCREAMING_SNAKE_CASE = arr[j] break result.append(A__ ) return result def __a ( A__ : list[float] ): SCREAMING_SNAKE_CASE = [] for i, outer in enumerate(A__ ): SCREAMING_SNAKE_CASE = -1 for inner in arr[i + 1 :]: if outer < inner: SCREAMING_SNAKE_CASE = inner break result.append(A__ ) return result def __a ( A__ : list[float] ): SCREAMING_SNAKE_CASE = len(A__ ) SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = [-1] * arr_size for index in reversed(range(A__ ) ): if stack: while stack[-1] <= arr[index]: stack.pop() if not stack: break if stack: SCREAMING_SNAKE_CASE = stack[-1] stack.append(arr[index] ) return result if __name__ == "__main__": from doctest import testmod from timeit import timeit testmod() print(next_greatest_element_slow(arr)) print(next_greatest_element_fast(arr)) print(next_greatest_element(arr)) __A : int = ( 'from __main__ import arr, next_greatest_element_slow, ' 'next_greatest_element_fast, next_greatest_element' ) print( 'next_greatest_element_slow():', timeit('next_greatest_element_slow(arr)', setup=setup), ) print( 'next_greatest_element_fast():', timeit('next_greatest_element_fast(arr)', setup=setup), ) print( ' next_greatest_element():', timeit('next_greatest_element(arr)', setup=setup), )
698
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __A : Union[str, Any] = logging.get_logger(__name__) __A : Dict = { 'microsoft/resnet-50': 'https://huggingface.co/microsoft/resnet-50/blob/main/config.json', } class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ): '''simple docstring''' lowerCamelCase__ = "resnet" lowerCamelCase__ = ["basic", "bottleneck"] def __init__( self : Optional[Any] , __lowerCamelCase : int=3 , __lowerCamelCase : Dict=64 , __lowerCamelCase : str=[256, 512, 1024, 2048] , __lowerCamelCase : str=[3, 4, 6, 3] , __lowerCamelCase : Optional[int]="bottleneck" , __lowerCamelCase : int="relu" , __lowerCamelCase : List[str]=False , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : int=None , **__lowerCamelCase : Dict , ): super().__init__(**__lowerCamelCase ) if layer_type not in self.layer_types: raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" ) SCREAMING_SNAKE_CASE = num_channels SCREAMING_SNAKE_CASE = embedding_size SCREAMING_SNAKE_CASE = hidden_sizes SCREAMING_SNAKE_CASE = depths SCREAMING_SNAKE_CASE = layer_type SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = downsample_in_first_stage SCREAMING_SNAKE_CASE = ["stem"] + [f"stage{idx}" for idx in range(1 , len(__lowerCamelCase ) + 1 )] SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_aligned_output_features_output_indices( out_features=__lowerCamelCase , out_indices=__lowerCamelCase , stage_names=self.stage_names ) class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = version.parse("1.11" ) @property def _snake_case ( self : Optional[int] ): return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def _snake_case ( self : Optional[int] ): return 1e-3
698
1
from collections.abc import Sequence def __a ( A__ : Sequence[int] | None = None ): if nums is None or not nums: raise ValueError("Input sequence should not be empty" ) SCREAMING_SNAKE_CASE = nums[0] for i in range(1 , len(A__ ) ): SCREAMING_SNAKE_CASE = nums[i] SCREAMING_SNAKE_CASE = max(A__ , ans + num , A__ ) return ans if __name__ == "__main__": import doctest doctest.testmod() # Try on a sample input from the user __A : Optional[int] = int(input('Enter number of elements : ').strip()) __A : Optional[int] = list(map(int, input('\nEnter the numbers : ').strip().split()))[:n] print(max_subsequence_sum(array))
698
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, TensorType __A : str = logging.get_logger(__name__) __A : Optional[Any] = { 'openai/imagegpt-small': '', 'openai/imagegpt-medium': '', 'openai/imagegpt-large': '', } class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = "imagegpt" lowerCamelCase__ = ["past_key_values"] lowerCamelCase__ = { "hidden_size": "n_embd", "max_position_embeddings": "n_positions", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self : List[str] , __lowerCamelCase : Any=512 + 1 , __lowerCamelCase : str=32 * 32 , __lowerCamelCase : Any=512 , __lowerCamelCase : Optional[int]=24 , __lowerCamelCase : Tuple=8 , __lowerCamelCase : List[str]=None , __lowerCamelCase : List[Any]="quick_gelu" , __lowerCamelCase : int=0.1 , __lowerCamelCase : str=0.1 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Optional[Any]=1e-5 , __lowerCamelCase : Optional[Any]=0.02 , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : List[Any]=False , __lowerCamelCase : int=False , __lowerCamelCase : int=False , **__lowerCamelCase : Union[str, Any] , ): SCREAMING_SNAKE_CASE = vocab_size SCREAMING_SNAKE_CASE = n_positions SCREAMING_SNAKE_CASE = n_embd SCREAMING_SNAKE_CASE = n_layer SCREAMING_SNAKE_CASE = n_head SCREAMING_SNAKE_CASE = n_inner SCREAMING_SNAKE_CASE = activation_function SCREAMING_SNAKE_CASE = resid_pdrop SCREAMING_SNAKE_CASE = embd_pdrop SCREAMING_SNAKE_CASE = attn_pdrop SCREAMING_SNAKE_CASE = layer_norm_epsilon SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = scale_attn_weights SCREAMING_SNAKE_CASE = use_cache SCREAMING_SNAKE_CASE = scale_attn_by_inverse_layer_idx SCREAMING_SNAKE_CASE = reorder_and_upcast_attn SCREAMING_SNAKE_CASE = tie_word_embeddings super().__init__(tie_word_embeddings=__lowerCamelCase , **__lowerCamelCase ) class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' @property def _snake_case ( self : Optional[Any] ): return OrderedDict( [ ("input_ids", {0: "batch", 1: "sequence"}), ] ) def _snake_case ( self : Optional[int] , __lowerCamelCase : "FeatureExtractionMixin" , __lowerCamelCase : int = 1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional["TensorType"] = None , __lowerCamelCase : int = 3 , __lowerCamelCase : int = 32 , __lowerCamelCase : int = 32 , ): SCREAMING_SNAKE_CASE = self._generate_dummy_images(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE = dict(preprocessor(images=__lowerCamelCase , return_tensors=__lowerCamelCase ) ) return inputs
698
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __A : str = { 'configuration_groupvit': [ 'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GroupViTConfig', 'GroupViTOnnxConfig', 'GroupViTTextConfig', 'GroupViTVisionConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Any = [ 'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'GroupViTModel', 'GroupViTPreTrainedModel', 'GroupViTTextModel', 'GroupViTVisionModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Union[str, Any] = [ 'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFGroupViTModel', 'TFGroupViTPreTrainedModel', 'TFGroupViTTextModel', 'TFGroupViTVisionModel', ] if TYPE_CHECKING: from .configuration_groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, GroupViTOnnxConfig, GroupViTTextConfig, GroupViTVisionConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_groupvit import ( GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_groupvit import ( TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel, ) else: import sys __A : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
698
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments from transformers.testing_utils import TestCasePlus, require_torch, slow from transformers.utils import is_datasets_available if is_datasets_available(): import datasets class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' @slow @require_torch def _snake_case ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE = EncoderDecoderModel.from_encoder_decoder_pretrained("prajjwal1/bert-tiny" , "prajjwal1/bert-tiny" ) SCREAMING_SNAKE_CASE = BertTokenizer.from_pretrained("bert-base-uncased" ) SCREAMING_SNAKE_CASE = bertabert.config.encoder.vocab_size SCREAMING_SNAKE_CASE = tokenizer.sep_token_id SCREAMING_SNAKE_CASE = tokenizer.cls_token_id SCREAMING_SNAKE_CASE = 128 SCREAMING_SNAKE_CASE = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="train[:1%]" ) SCREAMING_SNAKE_CASE = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="validation[:1%]" ) SCREAMING_SNAKE_CASE = train_dataset.select(range(32 ) ) SCREAMING_SNAKE_CASE = val_dataset.select(range(16 ) ) SCREAMING_SNAKE_CASE = 4 def _map_to_encoder_decoder_inputs(__lowerCamelCase : str ): # Tokenizer will automatically set [BOS] <text> [EOS] SCREAMING_SNAKE_CASE = tokenizer(batch["article"] , padding="max_length" , truncation=__lowerCamelCase , max_length=512 ) SCREAMING_SNAKE_CASE = tokenizer(batch["highlights"] , padding="max_length" , truncation=__lowerCamelCase , max_length=128 ) SCREAMING_SNAKE_CASE = inputs.input_ids SCREAMING_SNAKE_CASE = inputs.attention_mask SCREAMING_SNAKE_CASE = outputs.input_ids SCREAMING_SNAKE_CASE = outputs.input_ids.copy() SCREAMING_SNAKE_CASE = [ [-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["labels"] ] SCREAMING_SNAKE_CASE = outputs.attention_mask assert all(len(__lowerCamelCase ) == 512 for x in inputs.input_ids ) assert all(len(__lowerCamelCase ) == 128 for x in outputs.input_ids ) return batch def _compute_metrics(__lowerCamelCase : Union[str, Any] ): SCREAMING_SNAKE_CASE = pred.label_ids SCREAMING_SNAKE_CASE = pred.predictions # all unnecessary tokens are removed SCREAMING_SNAKE_CASE = tokenizer.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase ) SCREAMING_SNAKE_CASE = tokenizer.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase ) SCREAMING_SNAKE_CASE = sum([int(pred_str[i] == label_str[i] ) for i in range(len(__lowerCamelCase ) )] ) / len(__lowerCamelCase ) return {"accuracy": accuracy} # map train dataset SCREAMING_SNAKE_CASE = train_dataset.map( _map_to_encoder_decoder_inputs , batched=__lowerCamelCase , batch_size=__lowerCamelCase , remove_columns=["article", "highlights"] , ) train_dataset.set_format( type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , ) # same for validation dataset SCREAMING_SNAKE_CASE = val_dataset.map( _map_to_encoder_decoder_inputs , batched=__lowerCamelCase , batch_size=__lowerCamelCase , remove_columns=["article", "highlights"] , ) val_dataset.set_format( type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , ) SCREAMING_SNAKE_CASE = self.get_auto_remove_tmp_dir() SCREAMING_SNAKE_CASE = SeqaSeqTrainingArguments( output_dir=__lowerCamelCase , per_device_train_batch_size=__lowerCamelCase , per_device_eval_batch_size=__lowerCamelCase , predict_with_generate=__lowerCamelCase , evaluation_strategy="steps" , do_train=__lowerCamelCase , do_eval=__lowerCamelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , ) # instantiate trainer SCREAMING_SNAKE_CASE = SeqaSeqTrainer( model=__lowerCamelCase , args=__lowerCamelCase , compute_metrics=_compute_metrics , train_dataset=__lowerCamelCase , eval_dataset=__lowerCamelCase , tokenizer=__lowerCamelCase , ) # start training trainer.train()
698
1
import numpy as np # Importing the Keras libraries and packages import tensorflow as tf from tensorflow.keras import layers, models if __name__ == "__main__": # Initialising the CNN # (Sequential- Building the model layer by layer) __A : Optional[int] = models.Sequential() # Step 1 - Convolution # Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel # (3,3) is the kernel size (filter matrix) classifier.add( layers.ConvaD(3_2, (3, 3), input_shape=(6_4, 6_4, 3), activation='relu') ) # Step 2 - Pooling classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Adding a second convolutional layer classifier.add(layers.ConvaD(3_2, (3, 3), activation='relu')) classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Step 3 - Flattening classifier.add(layers.Flatten()) # Step 4 - Full connection classifier.add(layers.Dense(units=1_2_8, activation='relu')) classifier.add(layers.Dense(units=1, activation='sigmoid')) # Compiling the CNN classifier.compile( optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'] ) # Part 2 - Fitting the CNN to the images # Load Trained model weights # from keras.models import load_model # regressor=load_model('cnn.h5') __A : Dict = tf.keras.preprocessing.image.ImageDataGenerator( rescale=1.0 / 2_5_5, shear_range=0.2, zoom_range=0.2, horizontal_flip=True ) __A : List[Any] = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 2_5_5) __A : Union[str, Any] = train_datagen.flow_from_directory( 'dataset/training_set', target_size=(6_4, 6_4), batch_size=3_2, class_mode='binary' ) __A : Dict = test_datagen.flow_from_directory( 'dataset/test_set', target_size=(6_4, 6_4), batch_size=3_2, class_mode='binary' ) classifier.fit_generator( training_set, steps_per_epoch=5, epochs=3_0, validation_data=test_set ) classifier.save('cnn.h5') # Part 3 - Making new predictions __A : Any = tf.keras.preprocessing.image.load_img( 'dataset/single_prediction/image.png', target_size=(6_4, 6_4) ) __A : Tuple = tf.keras.preprocessing.image.img_to_array(test_image) __A : Tuple = np.expand_dims(test_image, axis=0) __A : Optional[int] = classifier.predict(test_image) # training_set.class_indices if result[0][0] == 0: __A : Any = 'Normal' if result[0][0] == 1: __A : Optional[Any] = 'Abnormality detected'
698
import inspect import unittest import warnings from math import ceil, floor from transformers import LevitConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_MAPPING, LevitForImageClassification, LevitForImageClassificationWithTeacher, LevitModel, ) from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import LevitImageProcessor class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' def _snake_case ( self : Optional[Any] ): SCREAMING_SNAKE_CASE = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(__lowerCamelCase , "hidden_sizes" ) ) self.parent.assertTrue(hasattr(__lowerCamelCase , "num_attention_heads" ) ) class _SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any]=13 , __lowerCamelCase : Optional[int]=64 , __lowerCamelCase : Dict=3 , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : int=2 , __lowerCamelCase : List[Any]=1 , __lowerCamelCase : Optional[Any]=16 , __lowerCamelCase : Tuple=[128, 256, 384] , __lowerCamelCase : int=[4, 6, 8] , __lowerCamelCase : Tuple=[2, 3, 4] , __lowerCamelCase : List[str]=[16, 16, 16] , __lowerCamelCase : int=0 , __lowerCamelCase : List[Any]=[2, 2, 2] , __lowerCamelCase : List[str]=[2, 2, 2] , __lowerCamelCase : List[str]=0.02 , __lowerCamelCase : Any=True , __lowerCamelCase : Dict=True , __lowerCamelCase : int=2 , ): SCREAMING_SNAKE_CASE = parent SCREAMING_SNAKE_CASE = batch_size SCREAMING_SNAKE_CASE = image_size SCREAMING_SNAKE_CASE = num_channels SCREAMING_SNAKE_CASE = kernel_size SCREAMING_SNAKE_CASE = stride SCREAMING_SNAKE_CASE = padding SCREAMING_SNAKE_CASE = hidden_sizes SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = depths SCREAMING_SNAKE_CASE = key_dim SCREAMING_SNAKE_CASE = drop_path_rate SCREAMING_SNAKE_CASE = patch_size SCREAMING_SNAKE_CASE = attention_ratio SCREAMING_SNAKE_CASE = mlp_ratio SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = [ ["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] SCREAMING_SNAKE_CASE = is_training SCREAMING_SNAKE_CASE = use_labels SCREAMING_SNAKE_CASE = num_labels SCREAMING_SNAKE_CASE = initializer_range def _snake_case ( self : int ): SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE = None if self.use_labels: SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels ) SCREAMING_SNAKE_CASE = self.get_config() return config, pixel_values, labels def _snake_case ( self : List[str] ): return LevitConfig( image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , ) def _snake_case ( self : Optional[int] , __lowerCamelCase : List[str] , __lowerCamelCase : str , __lowerCamelCase : Any ): SCREAMING_SNAKE_CASE = LevitModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() SCREAMING_SNAKE_CASE = model(__lowerCamelCase ) SCREAMING_SNAKE_CASE = (self.image_size, self.image_size) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = image_size[0], image_size[1] for _ in range(4 ): SCREAMING_SNAKE_CASE = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 ) SCREAMING_SNAKE_CASE = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , ) def _snake_case ( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str] ): SCREAMING_SNAKE_CASE = self.num_labels SCREAMING_SNAKE_CASE = LevitForImageClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() SCREAMING_SNAKE_CASE = model(__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _snake_case ( self : Optional[Any] ): SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = config_and_inputs SCREAMING_SNAKE_CASE = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = ( (LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher) if is_torch_available() else () ) lowerCamelCase__ = ( { "feature-extraction": LevitModel, "image-classification": (LevitForImageClassification, LevitForImageClassificationWithTeacher), } if is_torch_available() else {} ) lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False def _snake_case ( self : Dict ): SCREAMING_SNAKE_CASE = LevitModelTester(self ) SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 ) def _snake_case ( self : Any ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _snake_case ( self : Any ): return @unittest.skip(reason="Levit does not use inputs_embeds" ) def _snake_case ( self : Union[str, Any] ): pass @unittest.skip(reason="Levit does not support input and output embeddings" ) def _snake_case ( self : Tuple ): pass @unittest.skip(reason="Levit does not output attentions" ) def _snake_case ( self : Tuple ): pass def _snake_case ( self : Dict ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase ) SCREAMING_SNAKE_CASE = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCamelCase ) def _snake_case ( self : List[Any] ): def check_hidden_states_output(__lowerCamelCase : List[str] , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] ): SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) ) SCREAMING_SNAKE_CASE = outputs.hidden_states SCREAMING_SNAKE_CASE = len(self.model_tester.depths ) + 1 self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase ) SCREAMING_SNAKE_CASE = (self.model_tester.image_size, self.model_tester.image_size) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = image_size[0], image_size[1] for _ in range(4 ): SCREAMING_SNAKE_CASE = floor( ( (height + 2 * self.model_tester.padding - self.model_tester.kernel_size) / self.model_tester.stride ) + 1 ) SCREAMING_SNAKE_CASE = floor( ( (width + 2 * self.model_tester.padding - self.model_tester.kernel_size) / self.model_tester.stride ) + 1 ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [ height * width, self.model_tester.hidden_sizes[0], ] , ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def _snake_case ( self : int ): pass def _snake_case ( self : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : int=False ): SCREAMING_SNAKE_CASE = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) if return_labels: if model_class.__name__ == "LevitForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def _snake_case ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def _snake_case ( self : str ): SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase ) def _snake_case ( self : List[Any] ): if not self.model_tester.is_training: return SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE = True for model_class in self.all_model_classes: # LevitForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(__lowerCamelCase ) or model_class.__name__ == "LevitForImageClassificationWithTeacher" ): continue SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.train() SCREAMING_SNAKE_CASE = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) SCREAMING_SNAKE_CASE = model(**__lowerCamelCase ).loss loss.backward() def _snake_case ( self : Tuple ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = True for model_class in self.all_model_classes: if model_class in get_values(__lowerCamelCase ) or not model_class.supports_gradient_checkpointing: continue # LevitForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "LevitForImageClassificationWithTeacher": continue SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase ) model.gradient_checkpointing_enable() model.to(__lowerCamelCase ) model.train() SCREAMING_SNAKE_CASE = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) SCREAMING_SNAKE_CASE = model(**__lowerCamelCase ).loss loss.backward() def _snake_case ( self : List[str] ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE = [ {"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float}, {"title": "single_label_classification", "num_labels": 1, "dtype": torch.long}, {"title": "regression", "num_labels": 1, "dtype": torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(__lowerCamelCase ), ] or model_class.__name__ == "LevitForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=f"Testing {model_class} with {problem_type['title']}" ): SCREAMING_SNAKE_CASE = problem_type["title"] SCREAMING_SNAKE_CASE = problem_type["num_labels"] SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.train() SCREAMING_SNAKE_CASE = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) if problem_type["num_labels"] > 1: SCREAMING_SNAKE_CASE = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] ) SCREAMING_SNAKE_CASE = inputs["labels"].to(problem_type["dtype"] ) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=__lowerCamelCase ) as warning_list: SCREAMING_SNAKE_CASE = model(**__lowerCamelCase ).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message ): raise ValueError( f"Something is going wrong in the regression problem: intercepted {w.message}" ) loss.backward() @slow def _snake_case ( self : List[Any] ): for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE = LevitModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) def __a ( ): SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' @cached_property def _snake_case ( self : Dict ): return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) @slow def _snake_case ( self : Optional[int] ): SCREAMING_SNAKE_CASE = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to( __lowerCamelCase ) SCREAMING_SNAKE_CASE = self.default_image_processor SCREAMING_SNAKE_CASE = prepare_img() SCREAMING_SNAKE_CASE = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE = model(**__lowerCamelCase ) # verify the logits SCREAMING_SNAKE_CASE = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , __lowerCamelCase ) SCREAMING_SNAKE_CASE = torch.tensor([1.0_448, -0.3_745, -1.8_317] ).to(__lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
698
1
import argparse import json from pathlib import Path import torch import torchaudio from datasets import load_dataset from huggingface_hub import hf_hub_download from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification from transformers.utils import logging logging.set_verbosity_info() __A : Optional[int] = logging.get_logger(__name__) def __a ( A__ : Any ): SCREAMING_SNAKE_CASE = ASTConfig() if "10-10" in model_name: pass elif "speech-commands" in model_name: SCREAMING_SNAKE_CASE = 128 elif "12-12" in model_name: SCREAMING_SNAKE_CASE = 12 SCREAMING_SNAKE_CASE = 12 elif "14-14" in model_name: SCREAMING_SNAKE_CASE = 14 SCREAMING_SNAKE_CASE = 14 elif "16-16" in model_name: SCREAMING_SNAKE_CASE = 16 SCREAMING_SNAKE_CASE = 16 else: raise ValueError("Model not supported" ) SCREAMING_SNAKE_CASE = "huggingface/label-files" if "speech-commands" in model_name: SCREAMING_SNAKE_CASE = 35 SCREAMING_SNAKE_CASE = "speech-commands-v2-id2label.json" else: SCREAMING_SNAKE_CASE = 527 SCREAMING_SNAKE_CASE = "audioset-id2label.json" SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(A__ , A__ , repo_type="dataset" ) , "r" ) ) SCREAMING_SNAKE_CASE = {int(A__ ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE = idalabel SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()} return config def __a ( A__ : Optional[Any] ): if "module.v" in name: SCREAMING_SNAKE_CASE = name.replace("module.v" , "audio_spectrogram_transformer" ) if "cls_token" in name: SCREAMING_SNAKE_CASE = name.replace("cls_token" , "embeddings.cls_token" ) if "dist_token" in name: SCREAMING_SNAKE_CASE = name.replace("dist_token" , "embeddings.distillation_token" ) if "pos_embed" in name: SCREAMING_SNAKE_CASE = name.replace("pos_embed" , "embeddings.position_embeddings" ) if "patch_embed.proj" in name: SCREAMING_SNAKE_CASE = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) # transformer blocks if "blocks" in name: SCREAMING_SNAKE_CASE = name.replace("blocks" , "encoder.layer" ) if "attn.proj" in name: SCREAMING_SNAKE_CASE = name.replace("attn.proj" , "attention.output.dense" ) if "attn" in name: SCREAMING_SNAKE_CASE = name.replace("attn" , "attention.self" ) if "norm1" in name: SCREAMING_SNAKE_CASE = name.replace("norm1" , "layernorm_before" ) if "norm2" in name: SCREAMING_SNAKE_CASE = name.replace("norm2" , "layernorm_after" ) if "mlp.fc1" in name: SCREAMING_SNAKE_CASE = name.replace("mlp.fc1" , "intermediate.dense" ) if "mlp.fc2" in name: SCREAMING_SNAKE_CASE = name.replace("mlp.fc2" , "output.dense" ) # final layernorm if "audio_spectrogram_transformer.norm" in name: SCREAMING_SNAKE_CASE = name.replace("audio_spectrogram_transformer.norm" , "audio_spectrogram_transformer.layernorm" ) # classifier head if "module.mlp_head.0" in name: SCREAMING_SNAKE_CASE = name.replace("module.mlp_head.0" , "classifier.layernorm" ) if "module.mlp_head.1" in name: SCREAMING_SNAKE_CASE = name.replace("module.mlp_head.1" , "classifier.dense" ) return name def __a ( A__ : Optional[int] , A__ : Optional[int] ): for key in orig_state_dict.copy().keys(): SCREAMING_SNAKE_CASE = orig_state_dict.pop(A__ ) if "qkv" in key: SCREAMING_SNAKE_CASE = key.split("." ) SCREAMING_SNAKE_CASE = int(key_split[3] ) SCREAMING_SNAKE_CASE = config.hidden_size if "weight" in key: SCREAMING_SNAKE_CASE = val[:dim, :] SCREAMING_SNAKE_CASE = val[dim : dim * 2, :] SCREAMING_SNAKE_CASE = val[-dim:, :] else: SCREAMING_SNAKE_CASE = val[:dim] SCREAMING_SNAKE_CASE = val[dim : dim * 2] SCREAMING_SNAKE_CASE = val[-dim:] else: SCREAMING_SNAKE_CASE = val return orig_state_dict def __a ( A__ : int ): SCREAMING_SNAKE_CASE = [ "module.v.head.weight", "module.v.head.bias", "module.v.head_dist.weight", "module.v.head_dist.bias", ] for k in ignore_keys: state_dict.pop(A__ , A__ ) @torch.no_grad() def __a ( A__ : Union[str, Any] , A__ : List[Any] , A__ : Optional[Any]=False ): SCREAMING_SNAKE_CASE = get_audio_spectrogram_transformer_config(A__ ) SCREAMING_SNAKE_CASE = { "ast-finetuned-audioset-10-10-0.4593": ( "https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1" ), "ast-finetuned-audioset-10-10-0.450": ( "https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1" ), "ast-finetuned-audioset-10-10-0.448": ( "https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1" ), "ast-finetuned-audioset-10-10-0.448-v2": ( "https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1" ), "ast-finetuned-audioset-12-12-0.447": ( "https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1" ), "ast-finetuned-audioset-14-14-0.443": ( "https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1" ), "ast-finetuned-audioset-16-16-0.442": ( "https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1" ), "ast-finetuned-speech-commands-v2": ( "https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1" ), } # load original state_dict SCREAMING_SNAKE_CASE = model_name_to_url[model_name] SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(A__ , map_location="cpu" ) # remove some keys remove_keys(A__ ) # rename some keys SCREAMING_SNAKE_CASE = convert_state_dict(A__ , A__ ) # load 🤗 model SCREAMING_SNAKE_CASE = ASTForAudioClassification(A__ ) model.eval() model.load_state_dict(A__ ) # verify outputs on dummy input # source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62 SCREAMING_SNAKE_CASE = -4.2_6_7_7_3_9_3 if "speech-commands" not in model_name else -6.8_4_5_9_7_8 SCREAMING_SNAKE_CASE = 4.5_6_8_9_9_7_4 if "speech-commands" not in model_name else 5.5_6_5_4_5_2_6 SCREAMING_SNAKE_CASE = 1024 if "speech-commands" not in model_name else 128 SCREAMING_SNAKE_CASE = ASTFeatureExtractor(mean=A__ , std=A__ , max_length=A__ ) if "speech-commands" in model_name: SCREAMING_SNAKE_CASE = load_dataset("speech_commands" , "v0.02" , split="validation" ) SCREAMING_SNAKE_CASE = dataset[0]["audio"]["array"] else: SCREAMING_SNAKE_CASE = hf_hub_download( repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" , ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = torchaudio.load(A__ ) SCREAMING_SNAKE_CASE = waveform.squeeze().numpy() SCREAMING_SNAKE_CASE = feature_extractor(A__ , sampling_rate=16000 , return_tensors="pt" ) # forward pass SCREAMING_SNAKE_CASE = model(**A__ ) SCREAMING_SNAKE_CASE = outputs.logits if model_name == "ast-finetuned-audioset-10-10-0.4593": SCREAMING_SNAKE_CASE = torch.tensor([-0.8_7_6_0, -7.0_0_4_2, -8.6_6_0_2] ) elif model_name == "ast-finetuned-audioset-10-10-0.450": SCREAMING_SNAKE_CASE = torch.tensor([-1.1_9_8_6, -7.0_9_0_3, -8.2_7_1_8] ) elif model_name == "ast-finetuned-audioset-10-10-0.448": SCREAMING_SNAKE_CASE = torch.tensor([-2.6_1_2_8, -8.0_0_8_0, -9.4_3_4_4] ) elif model_name == "ast-finetuned-audioset-10-10-0.448-v2": SCREAMING_SNAKE_CASE = torch.tensor([-1.5_0_8_0, -7.4_5_3_4, -8.8_9_1_7] ) elif model_name == "ast-finetuned-audioset-12-12-0.447": SCREAMING_SNAKE_CASE = torch.tensor([-0.5_0_5_0, -6.5_8_3_3, -8.0_8_4_3] ) elif model_name == "ast-finetuned-audioset-14-14-0.443": SCREAMING_SNAKE_CASE = torch.tensor([-0.3_8_2_6, -7.0_3_3_6, -8.2_4_1_3] ) elif model_name == "ast-finetuned-audioset-16-16-0.442": SCREAMING_SNAKE_CASE = torch.tensor([-1.2_1_1_3, -6.9_1_0_1, -8.3_4_7_0] ) elif model_name == "ast-finetuned-speech-commands-v2": SCREAMING_SNAKE_CASE = torch.tensor([6.1_5_8_9, -8.0_5_6_6, -8.7_9_8_4] ) else: raise ValueError("Unknown model name" ) if not torch.allclose(logits[0, :3] , A__ , atol=1E-4 ): raise ValueError("Logits don't match" ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: Path(A__ ).mkdir(exist_ok=A__ ) print(F"Saving model {model_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(A__ ) print(F"Saving feature extractor to {pytorch_dump_folder_path}" ) feature_extractor.save_pretrained(A__ ) if push_to_hub: print("Pushing model and feature extractor to the hub..." ) model.push_to_hub(F"MIT/{model_name}" ) feature_extractor.push_to_hub(F"MIT/{model_name}" ) if __name__ == "__main__": __A : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='ast-finetuned-audioset-10-10-0.4593', type=str, help='Name of the Audio Spectrogram Transformer model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) __A : Tuple = parser.parse_args() convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
698
import unittest from pathlib import Path from tempfile import NamedTemporaryFile, TemporaryDirectory from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline from transformers.convert_graph_to_onnx import ( convert, ensure_valid_input, generate_identified_filename, infer_shapes, quantize, ) from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow class _SCREAMING_SNAKE_CASE : '''simple docstring''' def _snake_case ( self : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any ): return None class _SCREAMING_SNAKE_CASE : '''simple docstring''' def _snake_case ( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : int ): return None class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = [ # (model_name, model_kwargs) ("bert-base-cased", {}), ("gpt2", {"use_cache": False}), # We don't support exporting GPT2 past keys anymore ] @require_tf @slow def _snake_case ( self : List[Any] ): for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(__lowerCamelCase , "tf" , 12 , **__lowerCamelCase ) @require_torch @slow def _snake_case ( self : List[Any] ): for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(__lowerCamelCase , "pt" , 12 , **__lowerCamelCase ) @require_torch @slow def _snake_case ( self : Optional[int] ): from transformers import BertModel SCREAMING_SNAKE_CASE = ["[UNK]", "[SEP]", "[CLS]", "[PAD]", "[MASK]", "some", "other", "words"] with NamedTemporaryFile(mode="w+t" ) as vocab_file: vocab_file.write("\n".join(__lowerCamelCase ) ) vocab_file.flush() SCREAMING_SNAKE_CASE = BertTokenizerFast(vocab_file.name ) with TemporaryDirectory() as bert_save_dir: SCREAMING_SNAKE_CASE = BertModel(BertConfig(vocab_size=len(__lowerCamelCase ) ) ) model.save_pretrained(__lowerCamelCase ) self._test_export(__lowerCamelCase , "pt" , 12 , __lowerCamelCase ) @require_tf @slow def _snake_case ( self : Any ): for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: SCREAMING_SNAKE_CASE = self._test_export(__lowerCamelCase , "tf" , 12 , **__lowerCamelCase ) SCREAMING_SNAKE_CASE = quantize(Path(__lowerCamelCase ) ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(__lowerCamelCase ).stat().st_size: self.fail("Quantized model is bigger than initial ONNX model" ) @require_torch @slow def _snake_case ( self : Any ): for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: SCREAMING_SNAKE_CASE = self._test_export(__lowerCamelCase , "pt" , 12 , **__lowerCamelCase ) SCREAMING_SNAKE_CASE = quantize(__lowerCamelCase ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(__lowerCamelCase ).stat().st_size: self.fail("Quantized model is bigger than initial ONNX model" ) def _snake_case ( self : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : int , __lowerCamelCase : Tuple=None , **__lowerCamelCase : List[str] ): try: # Compute path with TemporaryDirectory() as tempdir: SCREAMING_SNAKE_CASE = Path(__lowerCamelCase ).joinpath("model.onnx" ) # Remove folder if exists if path.parent.exists(): path.parent.rmdir() # Export convert(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ) return path except Exception as e: self.fail(__lowerCamelCase ) @require_torch @require_tokenizers @slow def _snake_case ( self : Dict ): from transformers import BertModel SCREAMING_SNAKE_CASE = BertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) ) SCREAMING_SNAKE_CASE = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" ) self._test_infer_dynamic_axis(__lowerCamelCase , __lowerCamelCase , "pt" ) @require_tf @require_tokenizers @slow def _snake_case ( self : int ): from transformers import TFBertModel SCREAMING_SNAKE_CASE = TFBertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) ) SCREAMING_SNAKE_CASE = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" ) self._test_infer_dynamic_axis(__lowerCamelCase , __lowerCamelCase , "tf" ) def _snake_case ( self : int , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str ): SCREAMING_SNAKE_CASE = FeatureExtractionPipeline(__lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE = ["input_ids", "token_type_ids", "attention_mask", "output_0", "output_1"] SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = infer_shapes(__lowerCamelCase , __lowerCamelCase ) # Assert all variables are present self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) ) self.assertTrue(all(var_name in shapes for var_name in variable_names ) ) self.assertSequenceEqual(variable_names[:3] , __lowerCamelCase ) self.assertSequenceEqual(variable_names[3:] , __lowerCamelCase ) # Assert inputs are {0: batch, 1: sequence} for var_name in ["input_ids", "token_type_ids", "attention_mask"]: self.assertDictEqual(shapes[var_name] , {0: "batch", 1: "sequence"} ) # Assert outputs are {0: batch, 1: sequence} and {0: batch} self.assertDictEqual(shapes["output_0"] , {0: "batch", 1: "sequence"} ) self.assertDictEqual(shapes["output_1"] , {0: "batch"} ) def _snake_case ( self : Optional[Any] ): SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask", "token_type_ids"] SCREAMING_SNAKE_CASE = {"input_ids": [1, 2, 3, 4], "attention_mask": [0, 0, 0, 0], "token_type_ids": [1, 1, 1, 1]} SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = ensure_valid_input(FuncContiguousArgs() , __lowerCamelCase , __lowerCamelCase ) # Should have exactly the same number of args (all are valid) self.assertEqual(len(__lowerCamelCase ) , 3 ) # Should have exactly the same input names self.assertEqual(set(__lowerCamelCase ) , set(__lowerCamelCase ) ) # Parameter should be reordered according to their respective place in the function: # (input_ids, token_type_ids, attention_mask) self.assertEqual(__lowerCamelCase , (tokens["input_ids"], tokens["token_type_ids"], tokens["attention_mask"]) ) # Generated args are interleaved with another args (for instance parameter "past" in GPT2) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = ensure_valid_input(FuncNonContiguousArgs() , __lowerCamelCase , __lowerCamelCase ) # Should have exactly the one arg (all before the one not provided "some_other_args") self.assertEqual(len(__lowerCamelCase ) , 1 ) self.assertEqual(len(__lowerCamelCase ) , 1 ) # Should have only "input_ids" self.assertEqual(inputs_args[0] , tokens["input_ids"] ) self.assertEqual(ordered_input_names[0] , "input_ids" ) def _snake_case ( self : Optional[int] ): SCREAMING_SNAKE_CASE = generate_identified_filename(Path("/home/something/my_fake_model.onnx" ) , "-test" ) self.assertEqual("/home/something/my_fake_model-test.onnx" , generated.as_posix() )
698
1
def __a ( A__ : list[int] , A__ : int ): SCREAMING_SNAKE_CASE = len(A__ ) SCREAMING_SNAKE_CASE = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )] # for each arr value, a sum of zero(0) can be formed by not taking any element # hence True/1 for i in range(arr_len + 1 ): SCREAMING_SNAKE_CASE = True # sum is not zero and set is empty then false for i in range(1 , required_sum + 1 ): SCREAMING_SNAKE_CASE = False for i in range(1 , arr_len + 1 ): for j in range(1 , required_sum + 1 ): if arr[i - 1] > j: SCREAMING_SNAKE_CASE = subset[i - 1][j] if arr[i - 1] <= j: SCREAMING_SNAKE_CASE = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]] return subset[arr_len][required_sum] if __name__ == "__main__": import doctest doctest.testmod()
698
from manim import * class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' def _snake_case ( self : List[Any] ): SCREAMING_SNAKE_CASE = Rectangle(height=0.5 , width=0.5 ) SCREAMING_SNAKE_CASE = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )] SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )] SCREAMING_SNAKE_CASE = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 ) SCREAMING_SNAKE_CASE = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 ) SCREAMING_SNAKE_CASE = VGroup(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0 ) SCREAMING_SNAKE_CASE = Text("CPU" , font_size=24 ) SCREAMING_SNAKE_CASE = Group(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0.5 , aligned_edge=__lowerCamelCase ) cpu.move_to([-2.5, -0.5, 0] ) self.add(__lowerCamelCase ) SCREAMING_SNAKE_CASE = [mem.copy() for i in range(4 )] SCREAMING_SNAKE_CASE = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 ) SCREAMING_SNAKE_CASE = Text("GPU" , font_size=24 ) SCREAMING_SNAKE_CASE = Group(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0.5 , aligned_edge=__lowerCamelCase ) gpu.move_to([-1, -1, 0] ) self.add(__lowerCamelCase ) SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )] SCREAMING_SNAKE_CASE = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 ) SCREAMING_SNAKE_CASE = Text("Model" , font_size=24 ) SCREAMING_SNAKE_CASE = Group(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0.5 , aligned_edge=__lowerCamelCase ) model.move_to([3, -1.0, 0] ) self.add(__lowerCamelCase ) SCREAMING_SNAKE_CASE = [] for i, rect in enumerate(__lowerCamelCase ): rect.set_stroke(__lowerCamelCase ) # target = fill.copy().set_fill(YELLOW, opacity=0.7) # target.move_to(rect) # self.add(target) SCREAMING_SNAKE_CASE = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__lowerCamelCase , opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__lowerCamelCase ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(cpu_targs[0] , direction=__lowerCamelCase , buff=0.0 ) else: cpu_target.next_to(cpu_targs[i - 1] , direction=__lowerCamelCase , buff=0.0 ) self.add(__lowerCamelCase ) cpu_targs.append(__lowerCamelCase ) SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )] SCREAMING_SNAKE_CASE = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 ) SCREAMING_SNAKE_CASE = Text("Loaded Checkpoint" , font_size=24 ) SCREAMING_SNAKE_CASE = Group(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , aligned_edge=__lowerCamelCase , buff=0.4 ) checkpoint.move_to([3, 0.5, 0] ) SCREAMING_SNAKE_CASE = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) SCREAMING_SNAKE_CASE = MarkupText( f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(__lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE = MarkupText( f"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , ) blue_text.next_to(__lowerCamelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() ) SCREAMING_SNAKE_CASE = MarkupText( f"Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>." , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(__lowerCamelCase ) , Write(__lowerCamelCase ) ) self.play(Write(__lowerCamelCase , run_time=1 ) , Create(__lowerCamelCase , run_time=1 ) ) SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = [] for i, rect in enumerate(__lowerCamelCase ): SCREAMING_SNAKE_CASE = fill.copy().set_fill(__lowerCamelCase , opacity=0.7 ) target.move_to(__lowerCamelCase ) first_animations.append(GrowFromCenter(__lowerCamelCase , run_time=1 ) ) SCREAMING_SNAKE_CASE = target.copy() cpu_target.generate_target() if i < 5: cpu_target.target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.target.move_to(cpu_right_col_base[i - 5] ) second_animations.append(MoveToTarget(__lowerCamelCase , run_time=1.5 ) ) self.play(*__lowerCamelCase ) self.play(*__lowerCamelCase ) self.wait()
698
1
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.test_utils import execute_subprocess_async def __a ( A__ : str=None ): if subparsers is not None: SCREAMING_SNAKE_CASE = subparsers.add_parser("test" ) else: SCREAMING_SNAKE_CASE = argparse.ArgumentParser("Accelerate test command" ) parser.add_argument( "--config_file" , default=A__ , help=( "The path to use to store the config file. Will default to a file named default_config.yaml in the cache " "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have " "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed " "with 'huggingface'." ) , ) if subparsers is not None: parser.set_defaults(func=A__ ) return parser def __a ( A__ : Tuple ): SCREAMING_SNAKE_CASE = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] ) if args.config_file is None: SCREAMING_SNAKE_CASE = script_name else: SCREAMING_SNAKE_CASE = F"--config_file={args.config_file} {script_name}" SCREAMING_SNAKE_CASE = ["accelerate-launch"] + test_args.split() SCREAMING_SNAKE_CASE = execute_subprocess_async(A__ , env=os.environ.copy() ) if result.returncode == 0: print("Test is a success! You are ready for your distributed training!" ) def __a ( ): SCREAMING_SNAKE_CASE = test_command_parser() SCREAMING_SNAKE_CASE = parser.parse_args() test_command(A__ ) if __name__ == "__main__": main()
698
import unittest from transformers import is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class _SCREAMING_SNAKE_CASE : '''simple docstring''' @staticmethod def _snake_case ( *__lowerCamelCase : Any , **__lowerCamelCase : Any ): pass @is_pipeline_test @require_vision class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' @require_torch def _snake_case ( self : Dict ): SCREAMING_SNAKE_CASE = pipeline( model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , ) SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) SCREAMING_SNAKE_CASE = image_classifier(__lowerCamelCase , candidate_labels=["a", "b", "c"] ) # The floating scores are so close, we enter floating error approximation and the order is not guaranteed across # python and torch versions. self.assertIn( nested_simplify(__lowerCamelCase ) , [ [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}], [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}], ] , ) SCREAMING_SNAKE_CASE = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 ) self.assertEqual( nested_simplify(__lowerCamelCase ) , [ [ {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, ], [ {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, ], [ {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, ], [ {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, ], [ {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, ], ] , ) @require_tf def _snake_case ( self : int ): SCREAMING_SNAKE_CASE = pipeline( model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , framework="tf" ) SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) SCREAMING_SNAKE_CASE = image_classifier(__lowerCamelCase , candidate_labels=["a", "b", "c"] ) self.assertEqual( nested_simplify(__lowerCamelCase ) , [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}] , ) SCREAMING_SNAKE_CASE = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 ) self.assertEqual( nested_simplify(__lowerCamelCase ) , [ [ {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, ], [ {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, ], [ {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, ], [ {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, ], [ {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, {"score": 0.333, "label": ANY(__lowerCamelCase )}, ], ] , ) @slow @require_torch def _snake_case ( self : Tuple ): SCREAMING_SNAKE_CASE = pipeline( task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , ) # This is an image of 2 cats with remotes and no planes SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) SCREAMING_SNAKE_CASE = image_classifier(__lowerCamelCase , candidate_labels=["cat", "plane", "remote"] ) self.assertEqual( nested_simplify(__lowerCamelCase ) , [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ] , ) SCREAMING_SNAKE_CASE = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 ) self.assertEqual( nested_simplify(__lowerCamelCase ) , [ [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ] * 5 , ) @slow @require_tf def _snake_case ( self : Any ): SCREAMING_SNAKE_CASE = pipeline( task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , framework="tf" ) # This is an image of 2 cats with remotes and no planes SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) SCREAMING_SNAKE_CASE = image_classifier(__lowerCamelCase , candidate_labels=["cat", "plane", "remote"] ) self.assertEqual( nested_simplify(__lowerCamelCase ) , [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ] , ) SCREAMING_SNAKE_CASE = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 ) self.assertEqual( nested_simplify(__lowerCamelCase ) , [ [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ] * 5 , )
698
1
from typing import List, Union import numpy as np from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING __A : List[Any] = logging.get_logger(__name__) @add_end_docstrings(__snake_case ) class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' def __init__( self : List[str] , *__lowerCamelCase : int , **__lowerCamelCase : List[str] ): super().__init__(*__lowerCamelCase , **__lowerCamelCase ) requires_backends(self , "vision" ) self.check_model_type(__lowerCamelCase ) def __call__( self : List[Any] , __lowerCamelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **__lowerCamelCase : Union[str, Any] ): return super().__call__(__lowerCamelCase , **__lowerCamelCase ) def _snake_case ( self : List[str] , **__lowerCamelCase : int ): return {}, {}, {} def _snake_case ( self : Tuple , __lowerCamelCase : Any ): SCREAMING_SNAKE_CASE = load_image(__lowerCamelCase ) SCREAMING_SNAKE_CASE = image.size SCREAMING_SNAKE_CASE = self.image_processor(images=__lowerCamelCase , return_tensors=self.framework ) return model_inputs def _snake_case ( self : str , __lowerCamelCase : List[str] ): SCREAMING_SNAKE_CASE = self.model(**__lowerCamelCase ) return model_outputs def _snake_case ( self : Optional[Any] , __lowerCamelCase : Optional[int] ): SCREAMING_SNAKE_CASE = model_outputs.predicted_depth SCREAMING_SNAKE_CASE = torch.nn.functional.interpolate( predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode="bicubic" , align_corners=__lowerCamelCase ) SCREAMING_SNAKE_CASE = prediction.squeeze().cpu().numpy() SCREAMING_SNAKE_CASE = (output * 255 / np.max(__lowerCamelCase )).astype("uint8" ) SCREAMING_SNAKE_CASE = Image.fromarray(__lowerCamelCase ) SCREAMING_SNAKE_CASE = {} SCREAMING_SNAKE_CASE = predicted_depth SCREAMING_SNAKE_CASE = depth return output_dict
698
__A : dict[str, float] = { "joule": 1.0, "kilojoule": 1_0_0_0, "megajoule": 1_0_0_0_0_0_0, "gigajoule": 1_0_0_0_0_0_0_0_0_0, "wattsecond": 1.0, "watthour": 3_6_0_0, "kilowatthour": 3_6_0_0_0_0_0, "newtonmeter": 1.0, "calorie_nutr": 4_1_8_6.8, "kilocalorie_nutr": 4_1_8_6_8_0_0.0_0, "electronvolt": 1.6_0217_6634e-19, "britishthermalunit_it": 1_0_5_5.0_5_5_8_5, "footpound": 1.355_818, } def __a ( A__ : str , A__ : str , A__ : float ): if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION: SCREAMING_SNAKE_CASE = ( F"Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n" F"Valid values are: {', '.join(A__ )}" ) raise ValueError(A__ ) return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type] if __name__ == "__main__": import doctest doctest.testmod()
698
1
import argparse import os import re import packaging.version __A : str = 'examples/' __A : Union[str, Any] = { 'examples': (re.compile(r'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'), 'init': (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'), 'setup': (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'), 'doc': (re.compile(r'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'), } __A : Optional[Any] = { 'init': 'src/transformers/__init__.py', 'setup': 'setup.py', } __A : Union[str, Any] = 'README.md' def __a ( A__ : str , A__ : int , A__ : List[str] ): with open(A__ , "r" , encoding="utf-8" , newline="\n" ) as f: SCREAMING_SNAKE_CASE = f.read() SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = REPLACE_PATTERNS[pattern] SCREAMING_SNAKE_CASE = replace.replace("VERSION" , A__ ) SCREAMING_SNAKE_CASE = re_pattern.sub(A__ , A__ ) with open(A__ , "w" , encoding="utf-8" , newline="\n" ) as f: f.write(A__ ) def __a ( A__ : Optional[int] ): for folder, directories, fnames in os.walk(A__ ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove("research_projects" ) if "legacy" in directories: directories.remove("legacy" ) for fname in fnames: if fname.endswith(".py" ): update_version_in_file(os.path.join(A__ , A__ ) , A__ , pattern="examples" ) def __a ( A__ : int , A__ : str=False ): for pattern, fname in REPLACE_FILES.items(): update_version_in_file(A__ , A__ , A__ ) if not patch: update_version_in_examples(A__ ) def __a ( ): SCREAMING_SNAKE_CASE = "🤗 Transformers currently provides the following architectures" SCREAMING_SNAKE_CASE = "1. Want to contribute a new model?" with open(A__ , "r" , encoding="utf-8" , newline="\n" ) as f: SCREAMING_SNAKE_CASE = f.readlines() # Find the start of the list. SCREAMING_SNAKE_CASE = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 SCREAMING_SNAKE_CASE = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith("1." ): SCREAMING_SNAKE_CASE = lines[index].replace( "https://huggingface.co/docs/transformers/main/model_doc" , "https://huggingface.co/docs/transformers/model_doc" , ) index += 1 with open(A__ , "w" , encoding="utf-8" , newline="\n" ) as f: f.writelines(A__ ) def __a ( ): with open(REPLACE_FILES["init"] , "r" ) as f: SCREAMING_SNAKE_CASE = f.read() SCREAMING_SNAKE_CASE = REPLACE_PATTERNS["init"][0].search(A__ ).groups()[0] return packaging.version.parse(A__ ) def __a ( A__ : Union[str, Any]=False ): SCREAMING_SNAKE_CASE = get_version() if patch and default_version.is_devrelease: raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" ) if default_version.is_devrelease: SCREAMING_SNAKE_CASE = default_version.base_version elif patch: SCREAMING_SNAKE_CASE = F"{default_version.major}.{default_version.minor}.{default_version.micro + 1}" else: SCREAMING_SNAKE_CASE = F"{default_version.major}.{default_version.minor + 1}.0" # Now let's ask nicely if that's the right one. SCREAMING_SNAKE_CASE = input(F"Which version are you releasing? [{default_version}]" ) if len(A__ ) == 0: SCREAMING_SNAKE_CASE = default_version print(F"Updating version to {version}." ) global_version_update(A__ , patch=A__ ) if not patch: print("Cleaning main README, don't forget to run `make fix-copies`." ) clean_main_ref_in_model_list() def __a ( ): SCREAMING_SNAKE_CASE = get_version() SCREAMING_SNAKE_CASE = F"{current_version.major}.{current_version.minor + 1}.0.dev0" SCREAMING_SNAKE_CASE = current_version.base_version # Check with the user we got that right. SCREAMING_SNAKE_CASE = input(F"Which version are we developing now? [{dev_version}]" ) if len(A__ ) == 0: SCREAMING_SNAKE_CASE = dev_version print(F"Updating version to {version}." ) global_version_update(A__ ) print("Cleaning main README, don't forget to run `make fix-copies`." ) clean_main_ref_in_model_list() if __name__ == "__main__": __A : Union[str, Any] = argparse.ArgumentParser() parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.') parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.') __A : List[Any] = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print('Nothing to do after a patch :-)') else: post_release_work()
698
from collections import deque from .hash_table import HashTable class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' def __init__( self : Optional[int] , *__lowerCamelCase : List[Any] , **__lowerCamelCase : Optional[Any] ): super().__init__(*__lowerCamelCase , **__lowerCamelCase ) def _snake_case ( self : Any , __lowerCamelCase : Dict , __lowerCamelCase : Tuple ): SCREAMING_SNAKE_CASE = deque([] ) if self.values[key] is None else self.values[key] self.values[key].appendleft(__lowerCamelCase ) SCREAMING_SNAKE_CASE = self.values[key] def _snake_case ( self : Union[str, Any] ): return ( sum(self.charge_factor - len(__lowerCamelCase ) for slot in self.values ) / self.size_table * self.charge_factor ) def _snake_case ( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Tuple=None ): if not ( len(self.values[key] ) == self.charge_factor and self.values.count(__lowerCamelCase ) == 0 ): return key return super()._collision_resolution(__lowerCamelCase , __lowerCamelCase )
698
1
def __a ( A__ : list[list] ): SCREAMING_SNAKE_CASE = current_set.copy() for row_index, row in enumerate(A__ ): SCREAMING_SNAKE_CASE = row[0] for column_index, column in enumerate(A__ ): if magnitude == 0: SCREAMING_SNAKE_CASE = column continue SCREAMING_SNAKE_CASE = column / magnitude # Subtract to cancel term SCREAMING_SNAKE_CASE = current_set[0] SCREAMING_SNAKE_CASE = [first_row] SCREAMING_SNAKE_CASE = current_set[1::] for row in current_set: SCREAMING_SNAKE_CASE = [] # If first term is 0, it is already in form we want, so we preserve it if row[0] == 0: final_set.append(A__ ) continue for column_index in range(len(A__ ) ): temp_row.append(first_row[column_index] - row[column_index] ) final_set.append(A__ ) # Create next recursion iteration set if len(final_set[0] ) != 3: SCREAMING_SNAKE_CASE = final_set[0] SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = [] for row in final_set[1::]: current_first_column.append(row[0] ) next_iteration.append(row[1::] ) SCREAMING_SNAKE_CASE = simplify(A__ ) for i in range(len(A__ ) ): resultant[i].insert(0 , current_first_column[i] ) resultant.insert(0 , A__ ) SCREAMING_SNAKE_CASE = resultant return final_set def __a ( A__ : list[list] ): if len(A__ ) == 0: raise IndexError("solve_simultaneous() requires n lists of length n+1" ) SCREAMING_SNAKE_CASE = len(A__ ) + 1 if any(len(A__ ) != _length for item in equations ): raise IndexError("solve_simultaneous() requires n lists of length n+1" ) for row in equations: if any(not isinstance(A__ , (int, float) ) for column in row ): raise ValueError("solve_simultaneous() requires lists of integers" ) if len(A__ ) == 1: return [equations[0][-1] / equations[0][0]] SCREAMING_SNAKE_CASE = equations.copy() if any(0 in row for row in data_set ): SCREAMING_SNAKE_CASE = data_set.copy() SCREAMING_SNAKE_CASE = [] for row_index, row in enumerate(A__ ): if 0 not in row: SCREAMING_SNAKE_CASE = data_set.pop(A__ ) break if not full_row: raise ValueError("solve_simultaneous() requires at least 1 full equation" ) data_set.insert(0 , A__ ) SCREAMING_SNAKE_CASE = data_set.copy() SCREAMING_SNAKE_CASE = simplify(A__ ) SCREAMING_SNAKE_CASE = simplified[::-1] SCREAMING_SNAKE_CASE = [] for row in simplified: SCREAMING_SNAKE_CASE = row[-1] if not solutions: if row[-2] == 0: solutions.append(0 ) continue solutions.append(current_solution / row[-2] ) continue SCREAMING_SNAKE_CASE = row.copy()[: len(A__ ) - 1 :] while temp_row[0] == 0: temp_row.pop(0 ) if len(A__ ) == 0: solutions.append(0 ) continue SCREAMING_SNAKE_CASE = temp_row[1::] SCREAMING_SNAKE_CASE = temp_row[::-1] for column_index, column in enumerate(A__ ): current_solution -= column * solutions[column_index] solutions.append(A__ ) SCREAMING_SNAKE_CASE = [] for item in solutions: final.append(float(round(A__ , 5 ) ) ) return final[::-1] if __name__ == "__main__": import doctest doctest.testmod() __A : Union[str, Any] = [ [2, 1, 1, 1, 1, 4], [1, 2, 1, 1, 1, 5], [1, 1, 2, 1, 1, 6], [1, 1, 1, 2, 1, 7], [1, 1, 1, 1, 2, 8], ] print(solve_simultaneous(eq)) print(solve_simultaneous([[4, 2]]))
698
from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast from ...utils import logging __A : Optional[int] = logging.get_logger(__name__) __A : int = { 'EleutherAI/gpt-neo-1.3B': 'https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json', # See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo } class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = "gpt_neo" lowerCamelCase__ = ["past_key_values"] lowerCamelCase__ = {"num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"} def __init__( self : str , __lowerCamelCase : Dict=50257 , __lowerCamelCase : Tuple=2048 , __lowerCamelCase : Optional[Any]=2048 , __lowerCamelCase : int=24 , __lowerCamelCase : int=[[["global", "local"], 12]] , __lowerCamelCase : int=16 , __lowerCamelCase : List[str]=None , __lowerCamelCase : List[Any]=256 , __lowerCamelCase : Tuple="gelu_new" , __lowerCamelCase : Optional[Any]=0.0 , __lowerCamelCase : str=0.0 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : List[Any]=1e-5 , __lowerCamelCase : Dict=0.02 , __lowerCamelCase : Dict=True , __lowerCamelCase : Dict=50256 , __lowerCamelCase : Optional[int]=50256 , **__lowerCamelCase : Dict , ): SCREAMING_SNAKE_CASE = vocab_size SCREAMING_SNAKE_CASE = max_position_embeddings SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = num_layers SCREAMING_SNAKE_CASE = num_heads SCREAMING_SNAKE_CASE = intermediate_size SCREAMING_SNAKE_CASE = window_size SCREAMING_SNAKE_CASE = activation_function SCREAMING_SNAKE_CASE = resid_dropout SCREAMING_SNAKE_CASE = embed_dropout SCREAMING_SNAKE_CASE = attention_dropout SCREAMING_SNAKE_CASE = classifier_dropout SCREAMING_SNAKE_CASE = layer_norm_epsilon SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = use_cache SCREAMING_SNAKE_CASE = bos_token_id SCREAMING_SNAKE_CASE = eos_token_id SCREAMING_SNAKE_CASE = attention_types SCREAMING_SNAKE_CASE = self.expand_attention_types_params(__lowerCamelCase ) if len(self.attention_layers ) != self.num_layers: raise ValueError( "Configuration for convolutional module is incorrect. " "It is required that `len(config.attention_layers)` == `config.num_layers` " f"but is `len(config.attention_layers) = {len(self.attention_layers )}`, " f"`config.num_layers = {self.num_layers}`. " "`config.attention_layers` is prepared using `config.attention_types`. " "Please verify the value of `config.attention_types` argument." ) super().__init__(bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase ) @staticmethod def _snake_case ( __lowerCamelCase : Dict ): SCREAMING_SNAKE_CASE = [] for item in attention_types: for _ in range(item[1] ): attentions.extend(item[0] ) return attentions def __a ( A__ : str , A__ : List[Any] , A__ : List[str] , A__ : Union[str, Any] ): import torch SCREAMING_SNAKE_CASE = input.size() SCREAMING_SNAKE_CASE = len(A__ ) SCREAMING_SNAKE_CASE = shape[dimension] SCREAMING_SNAKE_CASE = torch.arange(0 , A__ , A__ ) SCREAMING_SNAKE_CASE = torch.div(sizedim - size , A__ , rounding_mode="floor" ) + 1 SCREAMING_SNAKE_CASE = torch.arange(A__ ) + low_indices[:min_length][:, None] SCREAMING_SNAKE_CASE = [slice(A__ )] * rank SCREAMING_SNAKE_CASE = indices SCREAMING_SNAKE_CASE = input[s] SCREAMING_SNAKE_CASE = list(range(0 , rank + 1 ) ) perm.append(perm.pop(dimension + 1 ) ) return sliced.permute(A__ ) def __a ( A__ : Union[str, Any] , A__ : Optional[int] ): import torch SCREAMING_SNAKE_CASE = torch.arange(1 , A__ ) SCREAMING_SNAKE_CASE = torch.remainder(A__ , A__ ) SCREAMING_SNAKE_CASE = remainders == 0 SCREAMING_SNAKE_CASE = candidates[divisor_indices] SCREAMING_SNAKE_CASE = torch.max(A__ ) return largest_divisor, torch.div(A__ , A__ , rounding_mode="floor" ) class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' @property def _snake_case ( self : Any ): SCREAMING_SNAKE_CASE = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} ) if self.use_past: self.fill_with_past_key_values_(__lowerCamelCase , direction="inputs" ) SCREAMING_SNAKE_CASE = {0: "batch", 1: "past_sequence + sequence"} else: SCREAMING_SNAKE_CASE = {0: "batch", 1: "sequence"} return common_inputs @property def _snake_case ( self : Optional[int] ): return self._config.num_heads def _snake_case ( self : Union[str, Any] , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ): SCREAMING_SNAKE_CASE = super(__lowerCamelCase , self ).generate_dummy_inputs( __lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase ) # We need to order the input in the way they appears in the forward() SCREAMING_SNAKE_CASE = OrderedDict({"input_ids": common_inputs["input_ids"]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = common_inputs["input_ids"].shape # Not using the same length for past_key_values SCREAMING_SNAKE_CASE = seqlen + 2 SCREAMING_SNAKE_CASE = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) SCREAMING_SNAKE_CASE = [ (torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase )) for _ in range(self.num_layers ) ] SCREAMING_SNAKE_CASE = common_inputs["attention_mask"] if self.use_past: SCREAMING_SNAKE_CASE = ordered_inputs["attention_mask"].dtype SCREAMING_SNAKE_CASE = torch.cat( [ordered_inputs["attention_mask"], torch.ones(__lowerCamelCase , __lowerCamelCase , dtype=__lowerCamelCase )] , dim=1 ) return ordered_inputs @property def _snake_case ( self : Optional[int] ): return 13
698
1
from copy import deepcopy from typing import Optional, Union import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, is_tf_available, is_torch_available if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = ["image_processor"] lowerCamelCase__ = "SamImageProcessor" def __init__( self : List[Any] , __lowerCamelCase : int ): super().__init__(__lowerCamelCase ) SCREAMING_SNAKE_CASE = self.image_processor SCREAMING_SNAKE_CASE = -10 SCREAMING_SNAKE_CASE = self.image_processor.size["longest_edge"] def __call__( self : Any , __lowerCamelCase : List[str]=None , __lowerCamelCase : Dict=None , __lowerCamelCase : Tuple=None , __lowerCamelCase : List[str]=None , __lowerCamelCase : Optional[Union[str, TensorType]] = None , **__lowerCamelCase : str , ): SCREAMING_SNAKE_CASE = self.image_processor( __lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , ) # pop arguments that are not used in the foward but used nevertheless SCREAMING_SNAKE_CASE = encoding_image_processor["original_sizes"] if hasattr(__lowerCamelCase , "numpy" ): # Checks if Torch or TF tensor SCREAMING_SNAKE_CASE = original_sizes.numpy() SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self._check_and_preprocess_points( input_points=__lowerCamelCase , input_labels=__lowerCamelCase , input_boxes=__lowerCamelCase , ) SCREAMING_SNAKE_CASE = self._normalize_and_convert( __lowerCamelCase , __lowerCamelCase , input_points=__lowerCamelCase , input_labels=__lowerCamelCase , input_boxes=__lowerCamelCase , return_tensors=__lowerCamelCase , ) return encoding_image_processor def _snake_case ( self : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Dict , __lowerCamelCase : Tuple=None , __lowerCamelCase : int=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Optional[Any]="pt" , ): if input_points is not None: if len(__lowerCamelCase ) != len(__lowerCamelCase ): SCREAMING_SNAKE_CASE = [ self._normalize_coordinates(self.target_size , __lowerCamelCase , original_sizes[0] ) for point in input_points ] else: SCREAMING_SNAKE_CASE = [ self._normalize_coordinates(self.target_size , __lowerCamelCase , __lowerCamelCase ) for point, original_size in zip(__lowerCamelCase , __lowerCamelCase ) ] # check that all arrays have the same shape if not all(point.shape == input_points[0].shape for point in input_points ): if input_labels is not None: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self._pad_points_and_labels(__lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE = np.array(__lowerCamelCase ) if input_labels is not None: SCREAMING_SNAKE_CASE = np.array(__lowerCamelCase ) if input_boxes is not None: if len(__lowerCamelCase ) != len(__lowerCamelCase ): SCREAMING_SNAKE_CASE = [ self._normalize_coordinates(self.target_size , __lowerCamelCase , original_sizes[0] , is_bounding_box=__lowerCamelCase ) for box in input_boxes ] else: SCREAMING_SNAKE_CASE = [ self._normalize_coordinates(self.target_size , __lowerCamelCase , __lowerCamelCase , is_bounding_box=__lowerCamelCase ) for box, original_size in zip(__lowerCamelCase , __lowerCamelCase ) ] SCREAMING_SNAKE_CASE = np.array(__lowerCamelCase ) if input_boxes is not None: if return_tensors == "pt": SCREAMING_SNAKE_CASE = torch.from_numpy(__lowerCamelCase ) # boxes batch size of 1 by default SCREAMING_SNAKE_CASE = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes elif return_tensors == "tf": SCREAMING_SNAKE_CASE = tf.convert_to_tensor(__lowerCamelCase ) # boxes batch size of 1 by default SCREAMING_SNAKE_CASE = tf.expand_dims(__lowerCamelCase , 1 ) if len(input_boxes.shape ) != 3 else input_boxes encoding_image_processor.update({"input_boxes": input_boxes} ) if input_points is not None: if return_tensors == "pt": SCREAMING_SNAKE_CASE = torch.from_numpy(__lowerCamelCase ) # point batch size of 1 by default SCREAMING_SNAKE_CASE = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points elif return_tensors == "tf": SCREAMING_SNAKE_CASE = tf.convert_to_tensor(__lowerCamelCase ) # point batch size of 1 by default SCREAMING_SNAKE_CASE = tf.expand_dims(__lowerCamelCase , 1 ) if len(input_points.shape ) != 4 else input_points encoding_image_processor.update({"input_points": input_points} ) if input_labels is not None: if return_tensors == "pt": SCREAMING_SNAKE_CASE = torch.from_numpy(__lowerCamelCase ) # point batch size of 1 by default SCREAMING_SNAKE_CASE = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels elif return_tensors == "tf": SCREAMING_SNAKE_CASE = tf.convert_to_tensor(__lowerCamelCase ) # point batch size of 1 by default SCREAMING_SNAKE_CASE = tf.expand_dims(__lowerCamelCase , 1 ) if len(input_labels.shape ) != 3 else input_labels encoding_image_processor.update({"input_labels": input_labels} ) return encoding_image_processor def _snake_case ( self : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] ): SCREAMING_SNAKE_CASE = max([point.shape[0] for point in input_points] ) SCREAMING_SNAKE_CASE = [] for i, point in enumerate(__lowerCamelCase ): if point.shape[0] != expected_nb_points: SCREAMING_SNAKE_CASE = np.concatenate( [point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 ) SCREAMING_SNAKE_CASE = np.append(input_labels[i] , [self.point_pad_value] ) processed_input_points.append(__lowerCamelCase ) SCREAMING_SNAKE_CASE = processed_input_points return input_points, input_labels def _snake_case ( self : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : np.ndarray , __lowerCamelCase : str , __lowerCamelCase : str=False ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = original_size SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor._get_preprocess_shape(__lowerCamelCase , longest_edge=__lowerCamelCase ) SCREAMING_SNAKE_CASE = deepcopy(__lowerCamelCase ).astype(__lowerCamelCase ) if is_bounding_box: SCREAMING_SNAKE_CASE = coords.reshape(-1 , 2 , 2 ) SCREAMING_SNAKE_CASE = coords[..., 0] * (new_w / old_w) SCREAMING_SNAKE_CASE = coords[..., 1] * (new_h / old_h) if is_bounding_box: SCREAMING_SNAKE_CASE = coords.reshape(-1 , 4 ) return coords def _snake_case ( self : List[str] , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Any=None , __lowerCamelCase : int=None , ): if input_points is not None: if hasattr(__lowerCamelCase , "numpy" ): # Checks for TF or Torch tensor SCREAMING_SNAKE_CASE = input_points.numpy().tolist() if not isinstance(__lowerCamelCase , __lowerCamelCase ) or not isinstance(input_points[0] , __lowerCamelCase ): raise ValueError("Input points must be a list of list of floating points." ) SCREAMING_SNAKE_CASE = [np.array(__lowerCamelCase ) for input_point in input_points] else: SCREAMING_SNAKE_CASE = None if input_labels is not None: if hasattr(__lowerCamelCase , "numpy" ): SCREAMING_SNAKE_CASE = input_labels.numpy().tolist() if not isinstance(__lowerCamelCase , __lowerCamelCase ) or not isinstance(input_labels[0] , __lowerCamelCase ): raise ValueError("Input labels must be a list of list integers." ) SCREAMING_SNAKE_CASE = [np.array(__lowerCamelCase ) for label in input_labels] else: SCREAMING_SNAKE_CASE = None if input_boxes is not None: if hasattr(__lowerCamelCase , "numpy" ): SCREAMING_SNAKE_CASE = input_boxes.numpy().tolist() if ( not isinstance(__lowerCamelCase , __lowerCamelCase ) or not isinstance(input_boxes[0] , __lowerCamelCase ) or not isinstance(input_boxes[0][0] , __lowerCamelCase ) ): raise ValueError("Input boxes must be a list of list of list of floating points." ) SCREAMING_SNAKE_CASE = [np.array(__lowerCamelCase ).astype(np.floataa ) for box in input_boxes] else: SCREAMING_SNAKE_CASE = None return input_points, input_labels, input_boxes @property def _snake_case ( self : Optional[int] ): SCREAMING_SNAKE_CASE = self.image_processor.model_input_names return list(dict.fromkeys(__lowerCamelCase ) ) def _snake_case ( self : int , *__lowerCamelCase : List[Any] , **__lowerCamelCase : int ): return self.image_processor.post_process_masks(*__lowerCamelCase , **__lowerCamelCase )
698
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available __A : Any = { 'configuration_longt5': ['LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LongT5Config', 'LongT5OnnxConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : List[str] = [ 'LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST', 'LongT5EncoderModel', 'LongT5ForConditionalGeneration', 'LongT5Model', 'LongT5PreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : List[Any] = [ 'FlaxLongT5ForConditionalGeneration', 'FlaxLongT5Model', 'FlaxLongT5PreTrainedModel', ] if TYPE_CHECKING: from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longta import ( LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST, LongTaEncoderModel, LongTaForConditionalGeneration, LongTaModel, LongTaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_longta import ( FlaxLongTaForConditionalGeneration, FlaxLongTaModel, FlaxLongTaPreTrainedModel, ) else: import sys __A : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
698
1
from ...configuration_utils import PretrainedConfig from ...utils import logging __A : Tuple = logging.get_logger(__name__) __A : Union[str, Any] = { 'google/canine-s': 'https://huggingface.co/google/canine-s/resolve/main/config.json', # See all CANINE models at https://huggingface.co/models?filter=canine } class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = "canine" def __init__( self : Optional[int] , __lowerCamelCase : Union[str, Any]=768 , __lowerCamelCase : Union[str, Any]=12 , __lowerCamelCase : int=12 , __lowerCamelCase : int=3072 , __lowerCamelCase : Any="gelu" , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : Optional[Any]=16384 , __lowerCamelCase : Dict=16 , __lowerCamelCase : Optional[Any]=0.02 , __lowerCamelCase : int=1e-12 , __lowerCamelCase : Tuple=0 , __lowerCamelCase : Union[str, Any]=0xE_000 , __lowerCamelCase : str=0xE_001 , __lowerCamelCase : int=4 , __lowerCamelCase : Optional[Any]=4 , __lowerCamelCase : Optional[Any]=8 , __lowerCamelCase : int=16384 , __lowerCamelCase : Union[str, Any]=128 , **__lowerCamelCase : Tuple , ): super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase ) SCREAMING_SNAKE_CASE = max_position_embeddings SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = num_hidden_layers SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = intermediate_size SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = hidden_dropout_prob SCREAMING_SNAKE_CASE = attention_probs_dropout_prob SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = type_vocab_size SCREAMING_SNAKE_CASE = layer_norm_eps # Character config: SCREAMING_SNAKE_CASE = downsampling_rate SCREAMING_SNAKE_CASE = upsampling_kernel_size SCREAMING_SNAKE_CASE = num_hash_functions SCREAMING_SNAKE_CASE = num_hash_buckets SCREAMING_SNAKE_CASE = local_transformer_stride
698
import cmath import math def __a ( A__ : float , A__ : float , A__ : float , A__ : float ): SCREAMING_SNAKE_CASE = math.radians(A__ ) SCREAMING_SNAKE_CASE = math.radians(A__ ) # Convert voltage and current to rectangular form SCREAMING_SNAKE_CASE = cmath.rect(A__ , A__ ) SCREAMING_SNAKE_CASE = cmath.rect(A__ , A__ ) # Calculate apparent power return voltage_rect * current_rect if __name__ == "__main__": import doctest doctest.testmod()
698
1
from __future__ import annotations import math from collections.abc import Callable def __a ( A__ : Callable[[int | float], int | float] , A__ : int | float , A__ : int | float , A__ : int = 100 , ): SCREAMING_SNAKE_CASE = x_start SCREAMING_SNAKE_CASE = fnc(A__ ) SCREAMING_SNAKE_CASE = 0.0 for _ in range(A__ ): # Approximates curve as a sequence of linear lines and sums their length SCREAMING_SNAKE_CASE = (x_end - x_start) / steps + xa SCREAMING_SNAKE_CASE = fnc(A__ ) length += math.hypot(xa - xa , fxa - fxa ) # Increment step SCREAMING_SNAKE_CASE = xa SCREAMING_SNAKE_CASE = fxa return length if __name__ == "__main__": def __a ( A__ : Optional[Any] ): return math.sin(10 * x ) print('f(x) = sin(10 * x)') print('The length of the curve from x = -10 to x = 10 is:') __A : Optional[int] = 1_0 while i <= 1_0_0_0_0_0: print(f'With {i} steps: {line_length(f, -1_0, 1_0, i)}') i *= 1_0
698
import argparse import json import os import torch from torch import nn from transformers import NllbMoeConfig, NllbMoeModel from transformers.modeling_utils import dtype_byte_size from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME def __a ( A__ : List[str] ): SCREAMING_SNAKE_CASE = [ "encoder.version", "decoder.version", "model.encoder.version", "model.decoder.version", "decoder.output_projection.weight", "_float_tensor", "encoder.embed_positions._float_tensor", "decoder.embed_positions._float_tensor", ] for k in ignore_keys: state_dict.pop(A__ , A__ ) def __a ( A__ : Tuple ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = emb.weight.shape SCREAMING_SNAKE_CASE = nn.Linear(A__ , A__ , bias=A__ ) SCREAMING_SNAKE_CASE = emb.weight.data return lin_layer def __a ( A__ : Tuple , A__ : Union[str, Any]=None ): SCREAMING_SNAKE_CASE = {} for old_key in state_dict.keys(): SCREAMING_SNAKE_CASE = old_key if "moe_layer.experts." in key: if expert_idx is not None: SCREAMING_SNAKE_CASE = key.replace("moe_layer.experts.0" , F"ffn.experts.expert_{expert_idx}" ) else: SCREAMING_SNAKE_CASE = key.replace("moe_layer.experts." , "ffn.experts.expert_" ) if "gate" in key: SCREAMING_SNAKE_CASE = key.replace(".moe_layer.gate.wg" , ".ffn.router.classifier" ) if "fc2" and "experts" not in key: SCREAMING_SNAKE_CASE = key.replace(".fc2." , ".ffn.fc2." ) if "fc1" and "experts" not in key: SCREAMING_SNAKE_CASE = key.replace(".fc1." , ".ffn.fc1." ) if ".encoder_attn." in key: SCREAMING_SNAKE_CASE = key.replace(".encoder_attn." , ".cross_attention." ) if "encoder_attn_layer_norm" in key: SCREAMING_SNAKE_CASE = key.replace("encoder_attn_layer_norm" , "cross_attention_layer_norm" ) if "final_layer_norm" in key: SCREAMING_SNAKE_CASE = key.replace("final_layer_norm" , "ff_layer_norm" ) SCREAMING_SNAKE_CASE = state_dict[old_key] return new_dict def __a ( A__ : List[str] , A__ : List[Any] , A__ : str , A__ : Union[str, Any] , A__ : str = WEIGHTS_NAME ): SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = 0 os.makedirs(A__ , exist_ok=A__ ) for expert in range(A__ ): SCREAMING_SNAKE_CASE = switch_checkpoint_path + F"-rank-{expert}.pt" if os.path.isfile(A__ ): SCREAMING_SNAKE_CASE = torch.load(A__ )["model"] remove_ignore_keys_(A__ ) SCREAMING_SNAKE_CASE = rename_fairseq_keys(A__ , A__ ) SCREAMING_SNAKE_CASE = os.path.join( A__ , weights_name.replace(".bin" , F"-{len(A__ )+1:05d}-of-???.bin" ) ) torch.save(A__ , A__ ) sharded_state_dicts.append(expert_state.keys() ) total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size( expert_state[list(A__ )[0]].dtype ) # Add the last block SCREAMING_SNAKE_CASE = os.path.join(A__ , weights_name.replace(".bin" , F"-{len(A__ )+1:05d}-of-???.bin" ) ) SCREAMING_SNAKE_CASE = torch.load(switch_checkpoint_path + "-shared.pt" )["model"] remove_ignore_keys_(A__ ) SCREAMING_SNAKE_CASE = rename_fairseq_keys(A__ , A__ ) SCREAMING_SNAKE_CASE = shared_weights["decoder.embed_tokens.weight"] sharded_state_dicts.append(shared_weights.keys() ) # If we only have the shared weights (dummy model/experts saved on the same file) if len(A__ ) == 1: SCREAMING_SNAKE_CASE = os.path.join(A__ , A__ ) torch.save(A__ , A__ ) return {weights_name: sharded_state_dicts[0]}, None else: torch.save(A__ , A__ ) # Otherwise, let's build the index SCREAMING_SNAKE_CASE = {} for idx, shard in enumerate(A__ ): SCREAMING_SNAKE_CASE = weights_name.replace(".bin" , F"-{idx+1:05d}-of-{len(A__ ):05d}.bin" ) SCREAMING_SNAKE_CASE = os.path.join(A__ , weights_name.replace(".bin" , F"-{idx+1:05d}-of-???.bin" ) ) os.rename(A__ , os.path.join(A__ , A__ ) ) for key in shard: SCREAMING_SNAKE_CASE = shard_file # Add the metadata SCREAMING_SNAKE_CASE = {"total_size": total_size} SCREAMING_SNAKE_CASE = {"metadata": metadata, "weight_map": weight_map} with open(os.path.join(A__ , A__ ) , "w" , encoding="utf-8" ) as f: SCREAMING_SNAKE_CASE = json.dumps(A__ , indent=2 , sort_keys=A__ ) + "\n" f.write(A__ ) return metadata, index if __name__ == "__main__": __A : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( '--nllb_moe_checkpoint_path', default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000', type=str, required=False, help='Path to a directory containing a folder per layer. Follows the original Google format.', ) parser.add_argument('--dtype', default='float32', type=str, required=False, help='dtype of the saved model') parser.add_argument( '--pytorch_dump_folder_path', default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b', type=str, required=False, help='Path to the output pytorch model.', ) __A : Optional[int] = parser.parse_args() __A , __A : Union[str, Any] = shard_on_the_fly( args.nllb_moe_checkpoint_path, args.pytorch_dump_folder_path, 1_2_8, args.dtype, ) __A : Any = NllbMoeConfig.from_pretrained( 'facebook/nllb-200-3.3B', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=1_2_8 ) config.save_pretrained(args.pytorch_dump_folder_path) __A : Any = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path) print('Done') model.save_pretrained(args.pytorch_dump_folder_path)
698
1
def __a ( A__ : list ): if not grid or not grid[0]: raise TypeError("The grid does not contain the appropriate information" ) for cell_n in range(1 , len(grid[0] ) ): grid[0][cell_n] += grid[0][cell_n - 1] SCREAMING_SNAKE_CASE = grid[0] for row_n in range(1 , len(A__ ) ): SCREAMING_SNAKE_CASE = grid[row_n] SCREAMING_SNAKE_CASE = fill_row(A__ , A__ ) SCREAMING_SNAKE_CASE = grid[row_n] return grid[-1][-1] def __a ( A__ : list , A__ : list ): current_row[0] += row_above[0] for cell_n in range(1 , len(A__ ) ): current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] ) return current_row if __name__ == "__main__": import doctest doctest.testmod()
698
import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast @require_vision class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE = tempfile.mkdtemp() SCREAMING_SNAKE_CASE = BlipImageProcessor() SCREAMING_SNAKE_CASE = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel" ) SCREAMING_SNAKE_CASE = BlipProcessor(__lowerCamelCase , __lowerCamelCase ) processor.save_pretrained(self.tmpdirname ) def _snake_case ( self : Dict , **__lowerCamelCase : Any ): return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase ).tokenizer def _snake_case ( self : List[Any] , **__lowerCamelCase : Optional[Any] ): return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase ).image_processor def _snake_case ( self : Union[str, Any] ): shutil.rmtree(self.tmpdirname ) def _snake_case ( self : Tuple ): SCREAMING_SNAKE_CASE = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] SCREAMING_SNAKE_CASE = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def _snake_case ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) SCREAMING_SNAKE_CASE = self.get_image_processor(do_normalize=__lowerCamelCase , padding_value=1.0 ) SCREAMING_SNAKE_CASE = BlipProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__lowerCamelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __lowerCamelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __lowerCamelCase ) def _snake_case ( self : Optional[int] ): SCREAMING_SNAKE_CASE = self.get_image_processor() SCREAMING_SNAKE_CASE = self.get_tokenizer() SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) SCREAMING_SNAKE_CASE = self.prepare_image_inputs() SCREAMING_SNAKE_CASE = image_processor(__lowerCamelCase , return_tensors="np" ) SCREAMING_SNAKE_CASE = processor(images=__lowerCamelCase , return_tensors="np" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def _snake_case ( self : Dict ): SCREAMING_SNAKE_CASE = self.get_image_processor() SCREAMING_SNAKE_CASE = self.get_tokenizer() SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) SCREAMING_SNAKE_CASE = "lower newer" SCREAMING_SNAKE_CASE = processor(text=__lowerCamelCase ) SCREAMING_SNAKE_CASE = tokenizer(__lowerCamelCase , return_token_type_ids=__lowerCamelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def _snake_case ( self : str ): SCREAMING_SNAKE_CASE = self.get_image_processor() SCREAMING_SNAKE_CASE = self.get_tokenizer() SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) SCREAMING_SNAKE_CASE = "lower newer" SCREAMING_SNAKE_CASE = self.prepare_image_inputs() SCREAMING_SNAKE_CASE = processor(text=__lowerCamelCase , images=__lowerCamelCase ) self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] ) # test if it raises when no input is passed with pytest.raises(__lowerCamelCase ): processor() def _snake_case ( self : Any ): SCREAMING_SNAKE_CASE = self.get_image_processor() SCREAMING_SNAKE_CASE = self.get_tokenizer() SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) SCREAMING_SNAKE_CASE = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] SCREAMING_SNAKE_CASE = processor.batch_decode(__lowerCamelCase ) SCREAMING_SNAKE_CASE = tokenizer.batch_decode(__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) def _snake_case ( self : Dict ): SCREAMING_SNAKE_CASE = self.get_image_processor() SCREAMING_SNAKE_CASE = self.get_tokenizer() SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) SCREAMING_SNAKE_CASE = "lower newer" SCREAMING_SNAKE_CASE = self.prepare_image_inputs() SCREAMING_SNAKE_CASE = processor(text=__lowerCamelCase , images=__lowerCamelCase ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
698
1
import logging import os import sys from pathlib import Path from unittest.mock import patch from parameterized import parameterized from run_eval import run_generate from run_eval_search import run_search from transformers.testing_utils import CaptureStdout, TestCasePlus, slow from utils import ROUGE_KEYS logging.basicConfig(level=logging.DEBUG) __A : List[Any] = logging.getLogger() def __a ( A__ : Path , A__ : list ): SCREAMING_SNAKE_CASE = "\n".join(A__ ) Path(A__ ).open("w" ).writelines(A__ ) __A : List[str] = 'patrickvonplaten/t5-tiny-random' __A : Any = 'sshleifer/bart-tiny-random' __A : int = 'sshleifer/tiny-mbart' __A : int = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' def _snake_case ( self : Dict , __lowerCamelCase : Optional[int] ): SCREAMING_SNAKE_CASE = Path(self.get_auto_remove_tmp_dir() ) / "utest_input.source" SCREAMING_SNAKE_CASE = input_file_name.parent / "utest_output.txt" assert not output_file_name.exists() SCREAMING_SNAKE_CASE = [" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."] _dump_articles(__lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE = str(Path(self.get_auto_remove_tmp_dir() ) / "scores.json" ) SCREAMING_SNAKE_CASE = "translation_en_to_de" if model == T5_TINY else "summarization" SCREAMING_SNAKE_CASE = f"\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n ".split() with patch.object(__lowerCamelCase , "argv" , __lowerCamelCase ): run_generate() assert Path(__lowerCamelCase ).exists() # os.remove(Path(output_file_name)) def _snake_case ( self : Any ): self.run_eval_tester(__lowerCamelCase ) @parameterized.expand([BART_TINY, MBART_TINY] ) @slow def _snake_case ( self : List[str] , __lowerCamelCase : int ): self.run_eval_tester(__lowerCamelCase ) @parameterized.expand([T5_TINY, MBART_TINY] ) @slow def _snake_case ( self : int , __lowerCamelCase : List[Any] ): SCREAMING_SNAKE_CASE = Path(self.get_auto_remove_tmp_dir() ) / "utest_input.source" SCREAMING_SNAKE_CASE = input_file_name.parent / "utest_output.txt" assert not output_file_name.exists() SCREAMING_SNAKE_CASE = { "en": ["Machine learning is great, isn't it?", "I like to eat bananas", "Tomorrow is another great day!"], "de": [ "Maschinelles Lernen ist großartig, oder?", "Ich esse gerne Bananen", "Morgen ist wieder ein toller Tag!", ], } SCREAMING_SNAKE_CASE = Path(self.get_auto_remove_tmp_dir() ) SCREAMING_SNAKE_CASE = str(tmp_dir / "scores.json" ) SCREAMING_SNAKE_CASE = str(tmp_dir / "val.target" ) _dump_articles(__lowerCamelCase , text["en"] ) _dump_articles(__lowerCamelCase , text["de"] ) SCREAMING_SNAKE_CASE = "translation_en_to_de" if model == T5_TINY else "summarization" SCREAMING_SNAKE_CASE = f"\n run_eval_search.py\n {model}\n {str(__lowerCamelCase )}\n {str(__lowerCamelCase )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n ".split() testargs.extend(["--search", "num_beams=1:2 length_penalty=0.9:1.0"] ) with patch.object(__lowerCamelCase , "argv" , __lowerCamelCase ): with CaptureStdout() as cs: run_search() SCREAMING_SNAKE_CASE = [" num_beams | length_penalty", model, "Best score args"] SCREAMING_SNAKE_CASE = ["Info"] if "translation" in task: expected_strings.append("bleu" ) else: expected_strings.extend(__lowerCamelCase ) for w in expected_strings: assert w in cs.out for w in un_expected_strings: assert w not in cs.out assert Path(__lowerCamelCase ).exists() os.remove(Path(__lowerCamelCase ) )
698
import unittest from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available from transformers.pipelines import pipeline from transformers.pipelines.document_question_answering import apply_tesseract from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_detectrona, require_pytesseract, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image from transformers.image_utils import load_image else: class _SCREAMING_SNAKE_CASE : '''simple docstring''' @staticmethod def _snake_case ( *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : Union[str, Any] ): pass def __a ( A__ : str ): return None # This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace, # so we can expect it to be available. __A : Tuple = ( 'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png' ) @is_pipeline_test @require_torch @require_vision class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING @require_pytesseract @require_vision def _snake_case ( self : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] ): SCREAMING_SNAKE_CASE = pipeline( "document-question-answering" , model=__lowerCamelCase , tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) SCREAMING_SNAKE_CASE = INVOICE_URL SCREAMING_SNAKE_CASE = list(zip(*apply_tesseract(load_image(__lowerCamelCase ) , __lowerCamelCase , "" ) ) ) SCREAMING_SNAKE_CASE = "What is the placebo?" SCREAMING_SNAKE_CASE = [ { "image": load_image(__lowerCamelCase ), "question": question, }, { "image": image, "question": question, }, { "image": image, "question": question, "word_boxes": word_boxes, }, ] return dqa_pipeline, examples def _snake_case ( self : List[Any] , __lowerCamelCase : int , __lowerCamelCase : int ): SCREAMING_SNAKE_CASE = dqa_pipeline(__lowerCamelCase , top_k=2 ) self.assertEqual( __lowerCamelCase , [ [ {"score": ANY(__lowerCamelCase ), "answer": ANY(__lowerCamelCase ), "start": ANY(__lowerCamelCase ), "end": ANY(__lowerCamelCase )}, {"score": ANY(__lowerCamelCase ), "answer": ANY(__lowerCamelCase ), "start": ANY(__lowerCamelCase ), "end": ANY(__lowerCamelCase )}, ] ] * 3 , ) @require_torch @require_detectrona @require_pytesseract def _snake_case ( self : Optional[int] ): SCREAMING_SNAKE_CASE = pipeline("document-question-answering" , model="hf-internal-testing/tiny-random-layoutlmv2" ) SCREAMING_SNAKE_CASE = INVOICE_URL SCREAMING_SNAKE_CASE = "How many cats are there?" SCREAMING_SNAKE_CASE = [ {"score": 0.0_001, "answer": "oy 2312/2019", "start": 38, "end": 39}, {"score": 0.0_001, "answer": "oy 2312/2019 DUE", "start": 38, "end": 40}, ] SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 ) self.assertEqual(nested_simplify(__lowerCamelCase , decimals=4 ) , __lowerCamelCase ) SCREAMING_SNAKE_CASE = dqa_pipeline({"image": image, "question": question} , top_k=2 ) self.assertEqual(nested_simplify(__lowerCamelCase , decimals=4 ) , __lowerCamelCase ) # This image does not detect ANY text in it, meaning layoutlmv2 should fail. # Empty answer probably SCREAMING_SNAKE_CASE = "./tests/fixtures/tests_samples/COCO/000000039769.png" SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 ) self.assertEqual(__lowerCamelCase , [] ) # We can optionnally pass directly the words and bounding boxes SCREAMING_SNAKE_CASE = "./tests/fixtures/tests_samples/COCO/000000039769.png" SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , words=__lowerCamelCase , boxes=__lowerCamelCase , top_k=2 ) self.assertEqual(__lowerCamelCase , [] ) @slow @require_torch @require_detectrona @require_pytesseract def _snake_case ( self : List[str] ): SCREAMING_SNAKE_CASE = pipeline( "document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , ) SCREAMING_SNAKE_CASE = INVOICE_URL SCREAMING_SNAKE_CASE = "What is the invoice number?" SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 ) self.assertEqual( nested_simplify(__lowerCamelCase , decimals=4 ) , [ {"score": 0.9_944, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0_009, "answer": "us-001", "start": 16, "end": 16}, ] , ) SCREAMING_SNAKE_CASE = dqa_pipeline({"image": image, "question": question} , top_k=2 ) self.assertEqual( nested_simplify(__lowerCamelCase , decimals=4 ) , [ {"score": 0.9_944, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0_009, "answer": "us-001", "start": 16, "end": 16}, ] , ) SCREAMING_SNAKE_CASE = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 ) self.assertEqual( nested_simplify(__lowerCamelCase , decimals=4 ) , [ [ {"score": 0.9_944, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0_009, "answer": "us-001", "start": 16, "end": 16}, ], ] * 2 , ) @slow @require_torch @require_detectrona @require_pytesseract def _snake_case ( self : str ): SCREAMING_SNAKE_CASE = pipeline( "document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , max_seq_len=50 , ) SCREAMING_SNAKE_CASE = INVOICE_URL SCREAMING_SNAKE_CASE = "What is the invoice number?" SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 ) self.assertEqual( nested_simplify(__lowerCamelCase , decimals=4 ) , [ {"score": 0.9_974, "answer": "1110212019", "start": 23, "end": 23}, {"score": 0.9_948, "answer": "us-001", "start": 16, "end": 16}, ] , ) SCREAMING_SNAKE_CASE = dqa_pipeline({"image": image, "question": question} , top_k=2 ) self.assertEqual( nested_simplify(__lowerCamelCase , decimals=4 ) , [ {"score": 0.9_974, "answer": "1110212019", "start": 23, "end": 23}, {"score": 0.9_948, "answer": "us-001", "start": 16, "end": 16}, ] , ) SCREAMING_SNAKE_CASE = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 ) self.assertEqual( nested_simplify(__lowerCamelCase , decimals=4 ) , [ [ {"score": 0.9_974, "answer": "1110212019", "start": 23, "end": 23}, {"score": 0.9_948, "answer": "us-001", "start": 16, "end": 16}, ] ] * 2 , ) @slow @require_torch @require_pytesseract @require_vision def _snake_case ( self : str ): SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained( "impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=__lowerCamelCase ) SCREAMING_SNAKE_CASE = pipeline( "document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=__lowerCamelCase , revision="3dc6de3" , ) SCREAMING_SNAKE_CASE = INVOICE_URL SCREAMING_SNAKE_CASE = "What is the invoice number?" SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 ) self.assertEqual( nested_simplify(__lowerCamelCase , decimals=4 ) , [ {"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23}, ] , ) SCREAMING_SNAKE_CASE = dqa_pipeline({"image": image, "question": question} , top_k=2 ) self.assertEqual( nested_simplify(__lowerCamelCase , decimals=4 ) , [ {"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23}, ] , ) SCREAMING_SNAKE_CASE = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 ) self.assertEqual( nested_simplify(__lowerCamelCase , decimals=4 ) , [ [ {"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23}, ] ] * 2 , ) SCREAMING_SNAKE_CASE = list(zip(*apply_tesseract(load_image(__lowerCamelCase ) , __lowerCamelCase , "" ) ) ) # This model should also work if `image` is set to None SCREAMING_SNAKE_CASE = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 ) self.assertEqual( nested_simplify(__lowerCamelCase , decimals=4 ) , [ {"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23}, ] , ) @slow @require_torch @require_pytesseract @require_vision def _snake_case ( self : Dict ): SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained( "impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=__lowerCamelCase ) SCREAMING_SNAKE_CASE = pipeline( "document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=__lowerCamelCase , revision="3dc6de3" , max_seq_len=50 , ) SCREAMING_SNAKE_CASE = INVOICE_URL SCREAMING_SNAKE_CASE = "What is the invoice number?" SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 ) self.assertEqual( nested_simplify(__lowerCamelCase , decimals=4 ) , [ {"score": 0.9_999, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.9_998, "answer": "us-001", "start": 16, "end": 16}, ] , ) SCREAMING_SNAKE_CASE = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 ) self.assertEqual( nested_simplify(__lowerCamelCase , decimals=4 ) , [ [ {"score": 0.9_999, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.9_998, "answer": "us-001", "start": 16, "end": 16}, ] ] * 2 , ) SCREAMING_SNAKE_CASE = list(zip(*apply_tesseract(load_image(__lowerCamelCase ) , __lowerCamelCase , "" ) ) ) # This model should also work if `image` is set to None SCREAMING_SNAKE_CASE = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 ) self.assertEqual( nested_simplify(__lowerCamelCase , decimals=4 ) , [ {"score": 0.9_999, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.9_998, "answer": "us-001", "start": 16, "end": 16}, ] , ) @slow @require_torch def _snake_case ( self : List[str] ): SCREAMING_SNAKE_CASE = pipeline( "document-question-answering" , model="naver-clova-ix/donut-base-finetuned-docvqa" , tokenizer=AutoTokenizer.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa" ) , feature_extractor="naver-clova-ix/donut-base-finetuned-docvqa" , ) SCREAMING_SNAKE_CASE = INVOICE_URL SCREAMING_SNAKE_CASE = "What is the invoice number?" SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 ) self.assertEqual(nested_simplify(__lowerCamelCase , decimals=4 ) , [{"answer": "us-001"}] ) @require_tf @unittest.skip("Document question answering not implemented in TF" ) def _snake_case ( self : List[Any] ): pass
698
1
import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from ...test_tokenization_common import TokenizerTesterMixin __A : List[Any] = get_tests_dir('fixtures/test_sentencepiece.model') if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right __A : Optional[Any] = 2_5_0_0_0_4 __A : int = 2_5_0_0_2_0 @require_sentencepiece @require_tokenizers class _SCREAMING_SNAKE_CASE ( __snake_case , unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = MBartaaTokenizer lowerCamelCase__ = MBartaaTokenizerFast lowerCamelCase__ = True lowerCamelCase__ = True def _snake_case ( self : Optional[int] ): super().setUp() # We have a SentencePiece fixture for testing SCREAMING_SNAKE_CASE = MBartaaTokenizer(__lowerCamelCase , src_lang="en_XX" , tgt_lang="ro_RO" , keep_accents=__lowerCamelCase ) tokenizer.save_pretrained(self.tmpdirname ) def _snake_case ( self : List[Any] ): SCREAMING_SNAKE_CASE = "<s>" SCREAMING_SNAKE_CASE = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) , __lowerCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) , __lowerCamelCase ) def _snake_case ( self : List[Any] ): SCREAMING_SNAKE_CASE = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<s>" ) self.assertEqual(vocab_keys[1] , "<pad>" ) self.assertEqual(vocab_keys[-1] , "<mask>" ) self.assertEqual(len(__lowerCamelCase ) , 1054 ) def _snake_case ( self : List[Any] ): self.assertEqual(self.get_tokenizer().vocab_size , 1054 ) def _snake_case ( self : List[str] ): SCREAMING_SNAKE_CASE = MBartaaTokenizer(__lowerCamelCase , src_lang="en_XX" , tgt_lang="ro_RO" , keep_accents=__lowerCamelCase ) SCREAMING_SNAKE_CASE = tokenizer.tokenize("This is a test" ) self.assertListEqual(__lowerCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) SCREAMING_SNAKE_CASE = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( __lowerCamelCase , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", "."] , ) SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(__lowerCamelCase ) self.assertListEqual( __lowerCamelCase , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(__lowerCamelCase ) self.assertListEqual( __lowerCamelCase , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", "."] , ) @slow def _snake_case ( self : List[Any] ): # fmt: off SCREAMING_SNAKE_CASE = {"input_ids": [[250004, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [250004, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [250004, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__lowerCamelCase , model_name="facebook/mbart-large-50" , revision="d3913889c59cd5c9e456b269c376325eabad57e2" , ) def _snake_case ( self : Tuple ): if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return SCREAMING_SNAKE_CASE = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart50", {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ): SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase ) SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase ) SCREAMING_SNAKE_CASE = tempfile.mkdtemp() SCREAMING_SNAKE_CASE = tokenizer_r.save_pretrained(__lowerCamelCase ) SCREAMING_SNAKE_CASE = tokenizer_p.save_pretrained(__lowerCamelCase ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) ) SCREAMING_SNAKE_CASE = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f ) self.assertSequenceEqual(__lowerCamelCase , __lowerCamelCase ) # Checks everything loads correctly in the same way SCREAMING_SNAKE_CASE = tokenizer_r.from_pretrained(__lowerCamelCase ) SCREAMING_SNAKE_CASE = tokenizer_p.from_pretrained(__lowerCamelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__lowerCamelCase , __lowerCamelCase ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(__lowerCamelCase ) # Save tokenizer rust, legacy_format=True SCREAMING_SNAKE_CASE = tempfile.mkdtemp() SCREAMING_SNAKE_CASE = tokenizer_r.save_pretrained(__lowerCamelCase , legacy_format=__lowerCamelCase ) SCREAMING_SNAKE_CASE = tokenizer_p.save_pretrained(__lowerCamelCase ) # Checks it save with the same files self.assertSequenceEqual(__lowerCamelCase , __lowerCamelCase ) # Checks everything loads correctly in the same way SCREAMING_SNAKE_CASE = tokenizer_r.from_pretrained(__lowerCamelCase ) SCREAMING_SNAKE_CASE = tokenizer_p.from_pretrained(__lowerCamelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__lowerCamelCase , __lowerCamelCase ) ) shutil.rmtree(__lowerCamelCase ) # Save tokenizer rust, legacy_format=False SCREAMING_SNAKE_CASE = tempfile.mkdtemp() SCREAMING_SNAKE_CASE = tokenizer_r.save_pretrained(__lowerCamelCase , legacy_format=__lowerCamelCase ) SCREAMING_SNAKE_CASE = tokenizer_p.save_pretrained(__lowerCamelCase ) # Checks it saved the tokenizer.json file self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way SCREAMING_SNAKE_CASE = tokenizer_r.from_pretrained(__lowerCamelCase ) SCREAMING_SNAKE_CASE = tokenizer_p.from_pretrained(__lowerCamelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__lowerCamelCase , __lowerCamelCase ) ) shutil.rmtree(__lowerCamelCase ) @require_torch @require_sentencepiece @require_tokenizers class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = "facebook/mbart-large-50-one-to-many-mmt" lowerCamelCase__ = [ " UN Chief Says There Is No Military Solution in Syria", " Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.", ] lowerCamelCase__ = [ "Şeful ONU declară că nu există o soluţie militară în Siria", "Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei" " pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor" " face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.", ] lowerCamelCase__ = [EN_CODE, 8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2] @classmethod def _snake_case ( cls : List[str] ): SCREAMING_SNAKE_CASE = MBartaaTokenizer.from_pretrained( cls.checkpoint_name , src_lang="en_XX" , tgt_lang="ro_RO" ) SCREAMING_SNAKE_CASE = 1 return cls def _snake_case ( self : Tuple ): self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ar_AR"] , 250001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["en_EN"] , 250004 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ro_RO"] , 250020 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["mr_IN"] , 250038 ) def _snake_case ( self : Dict ): SCREAMING_SNAKE_CASE = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , __lowerCamelCase ) def _snake_case ( self : Optional[Any] ): self.assertIn(__lowerCamelCase , self.tokenizer.all_special_ids ) SCREAMING_SNAKE_CASE = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2] SCREAMING_SNAKE_CASE = self.tokenizer.decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase ) SCREAMING_SNAKE_CASE = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__lowerCamelCase ) self.assertEqual(__lowerCamelCase , __lowerCamelCase ) self.assertNotIn(self.tokenizer.eos_token , __lowerCamelCase ) def _snake_case ( self : List[Any] ): SCREAMING_SNAKE_CASE = ["this is gunna be a long sentence " * 20] assert isinstance(src_text[0] , __lowerCamelCase ) SCREAMING_SNAKE_CASE = 10 SCREAMING_SNAKE_CASE = self.tokenizer(__lowerCamelCase , max_length=__lowerCamelCase , truncation=__lowerCamelCase ).input_ids[0] self.assertEqual(ids[0] , __lowerCamelCase ) self.assertEqual(ids[-1] , 2 ) self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase ) def _snake_case ( self : Any ): self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"] ) , [250053, 250001] ) def _snake_case ( self : List[Any] ): SCREAMING_SNAKE_CASE = tempfile.mkdtemp() SCREAMING_SNAKE_CASE = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(__lowerCamelCase ) SCREAMING_SNAKE_CASE = MBartaaTokenizer.from_pretrained(__lowerCamelCase ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __lowerCamelCase ) @require_torch def _snake_case ( self : List[str] ): SCREAMING_SNAKE_CASE = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__lowerCamelCase , return_tensors="pt" ) SCREAMING_SNAKE_CASE = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == RO_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE] @require_torch def _snake_case ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , ) SCREAMING_SNAKE_CASE = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) self.assertEqual((2, 14) , batch.input_ids.shape ) self.assertEqual((2, 14) , batch.attention_mask.shape ) SCREAMING_SNAKE_CASE = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , __lowerCamelCase ) self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) def _snake_case ( self : Optional[int] ): SCREAMING_SNAKE_CASE = self.tokenizer(self.src_text , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=3 , return_tensors="pt" ) SCREAMING_SNAKE_CASE = self.tokenizer( text_target=self.tgt_text , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=10 , return_tensors="pt" ) SCREAMING_SNAKE_CASE = targets["input_ids"] SCREAMING_SNAKE_CASE = shift_tokens_right(__lowerCamelCase , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def _snake_case ( self : List[Any] ): SCREAMING_SNAKE_CASE = self.tokenizer._build_translation_inputs( "A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="ar_AR" ) self.assertEqual( nested_simplify(__lowerCamelCase ) , { # en_XX, A, test, EOS "input_ids": [[250004, 62, 3034, 2]], "attention_mask": [[1, 1, 1, 1]], # ar_AR "forced_bos_token_id": 250001, } , )
698
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer from .base import PipelineTool class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = "facebook/bart-large-mnli" lowerCamelCase__ = ( "This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which " "should be the text to classify, and `labels`, which should be the list of labels to use for classification. " "It returns the most likely label in the list of provided `labels` for the input text." ) lowerCamelCase__ = "text_classifier" lowerCamelCase__ = AutoTokenizer lowerCamelCase__ = AutoModelForSequenceClassification lowerCamelCase__ = ["text", ["text"]] lowerCamelCase__ = ["text"] def _snake_case ( self : Optional[Any] ): super().setup() SCREAMING_SNAKE_CASE = self.model.config SCREAMING_SNAKE_CASE = -1 for idx, label in config.idalabel.items(): if label.lower().startswith("entail" ): SCREAMING_SNAKE_CASE = int(__lowerCamelCase ) if self.entailment_id == -1: raise ValueError("Could not determine the entailment ID from the model config, please pass it at init." ) def _snake_case ( self : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] ): SCREAMING_SNAKE_CASE = labels return self.pre_processor( [text] * len(__lowerCamelCase ) , [f"This example is {label}" for label in labels] , return_tensors="pt" , padding="max_length" , ) def _snake_case ( self : str , __lowerCamelCase : Optional[int] ): SCREAMING_SNAKE_CASE = outputs.logits SCREAMING_SNAKE_CASE = torch.argmax(logits[:, 2] ).item() return self._labels[label_id]
698
1
from typing import List, Optional, Union import numpy as np import tensorflow as tf from .utils import logging __A : int = logging.get_logger(__name__) def __a ( A__ : Union[tf.Tensor, np.ndarray] ): if isinstance(A__ , np.ndarray ): return list(tensor.shape ) SCREAMING_SNAKE_CASE = tf.shape(A__ ) if tensor.shape == tf.TensorShape(A__ ): return dynamic SCREAMING_SNAKE_CASE = tensor.shape.as_list() return [dynamic[i] if s is None else s for i, s in enumerate(A__ )] def __a ( A__ : tf.Tensor , A__ : Optional[int] = None , A__ : Optional[str] = None ): return tf.nn.softmax(logits=logits + 1E-9 , axis=A__ , name=A__ ) def __a ( A__ : str , A__ : Union[str, Any] , A__ : Any , A__ : Dict=1E-5 , A__ : Any=-1 ): # This is a very simplified functional layernorm, designed to duplicate # the functionality of PyTorch nn.functional.layer_norm when this is needed to port # models in Transformers. if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(A__ , A__ ): raise NotImplementedError("Only 1D weight and bias tensors are supported for now, with only a single axis." ) # Get mean and variance on the axis to be normalized SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = tf.nn.moments(A__ , axes=[axis] , keepdims=A__ ) if axis != -1: # Reshape scale and weight to have the same rank as inputs, but with 1 dimensions # on every dimension except axis SCREAMING_SNAKE_CASE = [1] * inputs.shape.rank SCREAMING_SNAKE_CASE = shape_list(A__ )[axis] SCREAMING_SNAKE_CASE = tf.reshape(A__ , A__ ) SCREAMING_SNAKE_CASE = tf.reshape(A__ , A__ ) # Compute layer normalization using the batch_normalization # function. SCREAMING_SNAKE_CASE = tf.nn.batch_normalization( A__ , A__ , A__ , offset=A__ , scale=A__ , variance_epsilon=A__ , ) return outputs def __a ( A__ : int , A__ : List[Any]=0 , A__ : Dict=-1 ): # Replicates the behavior of torch.flatten in TF # If end_dim or start_dim is negative, count them from the end if end_dim < 0: end_dim += input.shape.rank if start_dim < 0: start_dim += input.shape.rank if start_dim == end_dim: return input SCREAMING_SNAKE_CASE = tf.shape(A__ ) SCREAMING_SNAKE_CASE = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] ) SCREAMING_SNAKE_CASE = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 ) return tf.reshape(A__ , A__ ) def __a ( A__ : tf.Tensor ): if not isinstance(A__ , tf.Tensor ): SCREAMING_SNAKE_CASE = tf.convert_to_tensor(A__ ) # Catches stray NumPy inputs if encoder_attention_mask.shape.rank == 3: SCREAMING_SNAKE_CASE = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.shape.rank == 2: SCREAMING_SNAKE_CASE = encoder_attention_mask[:, None, None, :] # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow # /transformer/transformer_layers.py#L270 # encoder_extended_attention_mask = (encoder_extended_attention_mask == # encoder_extended_attention_mask.transpose(-1, -2)) SCREAMING_SNAKE_CASE = ( tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask ) * encoder_extended_attention_mask.dtype.min return encoder_extended_attention_mask def __a ( A__ : tf.Tensor , A__ : int , A__ : str = "input_ids" ): tf.debugging.assert_less( A__ , tf.cast(A__ , dtype=tensor.dtype ) , message=( F"The maximum value of {tensor_name} ({tf.math.reduce_max(A__ )}) must be smaller than the embedding " F"layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time." ) , ) def __a ( A__ : int , A__ : Optional[Any] , A__ : Optional[int] ): SCREAMING_SNAKE_CASE = 64512 # Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT` # because in that case even chunking the array would not make the saving # possible. SCREAMING_SNAKE_CASE = [x for x in data if len(A__ ) > HDF5_OBJECT_HEADER_LIMIT] # Expecting this to never be true. if bad_attributes: raise RuntimeError( "The following attributes cannot be saved to HDF5 file because " F"they are larger than {HDF5_OBJECT_HEADER_LIMIT} " F"bytes: {bad_attributes}" ) SCREAMING_SNAKE_CASE = np.asarray(A__ ) SCREAMING_SNAKE_CASE = 1 SCREAMING_SNAKE_CASE = np.array_split(A__ , A__ ) # This will never loop forever thanks to the test above. while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ): num_chunks += 1 SCREAMING_SNAKE_CASE = np.array_split(A__ , A__ ) if num_chunks > 1: for chunk_id, chunk_data in enumerate(A__ ): SCREAMING_SNAKE_CASE = chunk_data else: SCREAMING_SNAKE_CASE = data def __a ( A__ : Tuple , A__ : Tuple ): if name in group.attrs: SCREAMING_SNAKE_CASE = [n.decode("utf8" ) if hasattr(A__ , "decode" ) else n for n in group.attrs[name]] else: SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = 0 while "%s%d" % (name, chunk_id) in group.attrs: data.extend( [n.decode("utf8" ) if hasattr(A__ , "decode" ) else n for n in group.attrs["%s%d" % (name, chunk_id)]] ) chunk_id += 1 return data def __a ( A__ : List[str] ): def _expand_single_ad_tensor(A__ : List[str] ): if isinstance(A__ , tf.Tensor ) and t.shape.rank == 1: return tf.expand_dims(A__ , axis=-1 ) return t return tf.nest.map_structure(_expand_single_ad_tensor , A__ )
698
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.test_utils import execute_subprocess_async def __a ( A__ : str=None ): if subparsers is not None: SCREAMING_SNAKE_CASE = subparsers.add_parser("test" ) else: SCREAMING_SNAKE_CASE = argparse.ArgumentParser("Accelerate test command" ) parser.add_argument( "--config_file" , default=A__ , help=( "The path to use to store the config file. Will default to a file named default_config.yaml in the cache " "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have " "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed " "with 'huggingface'." ) , ) if subparsers is not None: parser.set_defaults(func=A__ ) return parser def __a ( A__ : Tuple ): SCREAMING_SNAKE_CASE = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] ) if args.config_file is None: SCREAMING_SNAKE_CASE = script_name else: SCREAMING_SNAKE_CASE = F"--config_file={args.config_file} {script_name}" SCREAMING_SNAKE_CASE = ["accelerate-launch"] + test_args.split() SCREAMING_SNAKE_CASE = execute_subprocess_async(A__ , env=os.environ.copy() ) if result.returncode == 0: print("Test is a success! You are ready for your distributed training!" ) def __a ( ): SCREAMING_SNAKE_CASE = test_command_parser() SCREAMING_SNAKE_CASE = parser.parse_args() test_command(A__ ) if __name__ == "__main__": main()
698
1
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __A : Optional[Any] = logging.get_logger(__name__) __A : Tuple = {'vocab_file': 'spiece.model'} __A : Union[str, Any] = { 'vocab_file': { 'bert_for_seq_generation': ( 'https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model' ), } } __A : str = {'bert_for_seq_generation': 5_1_2} class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = VOCAB_FILES_NAMES lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__ = [] lowerCamelCase__ = ["input_ids", "attention_mask"] def __init__( self : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any]="<s>" , __lowerCamelCase : Union[str, Any]="</s>" , __lowerCamelCase : List[str]="<unk>" , __lowerCamelCase : Union[str, Any]="<pad>" , __lowerCamelCase : Union[str, Any]="<::::>" , __lowerCamelCase : Optional[Dict[str, Any]] = None , **__lowerCamelCase : Optional[Any] , ): SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs # Add extra_ids to the special token list super().__init__( bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , sep_token=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , ) SCREAMING_SNAKE_CASE = vocab_file SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__lowerCamelCase ) @property def _snake_case ( self : Union[str, Any] ): return self.sp_model.get_piece_size() def _snake_case ( self : int ): SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Optional[Any] ): SCREAMING_SNAKE_CASE = self.__dict__.copy() SCREAMING_SNAKE_CASE = None return state def __setstate__( self : Tuple , __lowerCamelCase : List[Any] ): SCREAMING_SNAKE_CASE = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): SCREAMING_SNAKE_CASE = {} SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _snake_case ( self : Optional[Any] , __lowerCamelCase : str ): return self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase ) def _snake_case ( self : str , __lowerCamelCase : Optional[int] ): return self.sp_model.piece_to_id(__lowerCamelCase ) def _snake_case ( self : List[Any] , __lowerCamelCase : int ): SCREAMING_SNAKE_CASE = self.sp_model.IdToPiece(__lowerCamelCase ) return token def _snake_case ( self : Tuple , __lowerCamelCase : List[Any] ): SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = "" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(__lowerCamelCase ) + token SCREAMING_SNAKE_CASE = [] else: current_sub_tokens.append(__lowerCamelCase ) out_string += self.sp_model.decode(__lowerCamelCase ) return out_string.strip() def _snake_case ( self : int , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ): if not os.path.isdir(__lowerCamelCase ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return SCREAMING_SNAKE_CASE = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __lowerCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(__lowerCamelCase , "wb" ) as fi: SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto() fi.write(__lowerCamelCase ) return (out_vocab_file,)
698
import json import os from functools import lru_cache from typing import TYPE_CHECKING, List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation __A : Optional[int] = logging.get_logger(__name__) __A : List[str] = { 'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_config_file': 'tokenizer_config.json', } __A : Tuple = { 'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'}, 'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'}, 'tokenizer_config_file': { 'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json' }, } __A : Dict = {'facebook/blenderbot-3B': 1_2_8} @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def __a ( ): SCREAMING_SNAKE_CASE = ( list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) ) ) SCREAMING_SNAKE_CASE = bs[:] SCREAMING_SNAKE_CASE = 0 for b in range(2**8 ): if b not in bs: bs.append(A__ ) cs.append(2**8 + n ) n += 1 SCREAMING_SNAKE_CASE = [chr(A__ ) for n in cs] return dict(zip(A__ , A__ ) ) def __a ( A__ : Optional[int] ): SCREAMING_SNAKE_CASE = set() SCREAMING_SNAKE_CASE = word[0] for char in word[1:]: pairs.add((prev_char, char) ) SCREAMING_SNAKE_CASE = char return pairs class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = VOCAB_FILES_NAMES lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__ = ["input_ids", "attention_mask"] def __init__( self : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : List[Any]="replace" , __lowerCamelCase : Any="<s>" , __lowerCamelCase : List[Any]="</s>" , __lowerCamelCase : Dict="</s>" , __lowerCamelCase : Any="<s>" , __lowerCamelCase : Optional[Any]="<unk>" , __lowerCamelCase : str="<pad>" , __lowerCamelCase : List[Any]="<mask>" , __lowerCamelCase : List[Any]=False , **__lowerCamelCase : Tuple , ): SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token super().__init__( errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , **__lowerCamelCase , ) with open(__lowerCamelCase , encoding="utf-8" ) as vocab_handle: SCREAMING_SNAKE_CASE = json.load(__lowerCamelCase ) SCREAMING_SNAKE_CASE = {v: k for k, v in self.encoder.items()} SCREAMING_SNAKE_CASE = errors # how to handle errors in decoding SCREAMING_SNAKE_CASE = bytes_to_unicode() SCREAMING_SNAKE_CASE = {v: k for k, v in self.byte_encoder.items()} with open(__lowerCamelCase , encoding="utf-8" ) as merges_handle: SCREAMING_SNAKE_CASE = merges_handle.read().split("\n" )[1:-1] SCREAMING_SNAKE_CASE = [tuple(merge.split() ) for merge in bpe_merges] SCREAMING_SNAKE_CASE = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) ) SCREAMING_SNAKE_CASE = {} SCREAMING_SNAKE_CASE = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions SCREAMING_SNAKE_CASE = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" ) @property # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot def _snake_case ( self : Dict ): return len(self.encoder ) def _snake_case ( self : int ): return dict(self.encoder , **self.added_tokens_encoder ) def _snake_case ( self : Tuple , __lowerCamelCase : List[str] ): if token in self.cache: return self.cache[token] SCREAMING_SNAKE_CASE = tuple(__lowerCamelCase ) SCREAMING_SNAKE_CASE = get_pairs(__lowerCamelCase ) if not pairs: return token while True: SCREAMING_SNAKE_CASE = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float("inf" ) ) ) if bigram not in self.bpe_ranks: break SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = bigram SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = 0 while i < len(__lowerCamelCase ): try: SCREAMING_SNAKE_CASE = word.index(__lowerCamelCase , __lowerCamelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) SCREAMING_SNAKE_CASE = j if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 SCREAMING_SNAKE_CASE = tuple(__lowerCamelCase ) SCREAMING_SNAKE_CASE = new_word if len(__lowerCamelCase ) == 1: break else: SCREAMING_SNAKE_CASE = get_pairs(__lowerCamelCase ) SCREAMING_SNAKE_CASE = " ".join(__lowerCamelCase ) SCREAMING_SNAKE_CASE = word return word def _snake_case ( self : str , __lowerCamelCase : Dict ): SCREAMING_SNAKE_CASE = [] for token in re.findall(self.pat , __lowerCamelCase ): SCREAMING_SNAKE_CASE = "".join( self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowerCamelCase ).split(" " ) ) return bpe_tokens def _snake_case ( self : List[str] , __lowerCamelCase : str ): return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) ) def _snake_case ( self : Optional[int] , __lowerCamelCase : Tuple ): return self.decoder.get(__lowerCamelCase ) def _snake_case ( self : Tuple , __lowerCamelCase : str ): SCREAMING_SNAKE_CASE = "".join(__lowerCamelCase ) SCREAMING_SNAKE_CASE = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors ) return text def _snake_case ( self : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ): if not os.path.isdir(__lowerCamelCase ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return SCREAMING_SNAKE_CASE = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) SCREAMING_SNAKE_CASE = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + "\n" ) SCREAMING_SNAKE_CASE = 0 with open(__lowerCamelCase , "w" , encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase : kv[1] ): if index != token_index: logger.warning( f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive." " Please check that the tokenizer is not corrupted!" ) SCREAMING_SNAKE_CASE = token_index writer.write(" ".join(__lowerCamelCase ) + "\n" ) index += 1 return vocab_file, merge_file def _snake_case ( self : Tuple , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase ) if token_ids_a is None: return [1] + ([0] * len(__lowerCamelCase )) + [1] return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1] def _snake_case ( self : str , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ): SCREAMING_SNAKE_CASE = [self.sep_token_id] SCREAMING_SNAKE_CASE = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _snake_case ( self : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict=False , **__lowerCamelCase : Optional[int] ): SCREAMING_SNAKE_CASE = kwargs.pop("add_prefix_space" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(__lowerCamelCase ) > 0 and not text[0].isspace()): SCREAMING_SNAKE_CASE = " " + text return (text, kwargs) def _snake_case ( self : Dict , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ): return token_ids_a + [self.eos_token_id] def _snake_case ( self : Tuple , __lowerCamelCase : "Conversation" ): SCREAMING_SNAKE_CASE = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(" " + text ) else: # Generated responses should contain them already. inputs.append(__lowerCamelCase ) SCREAMING_SNAKE_CASE = " ".join(__lowerCamelCase ) SCREAMING_SNAKE_CASE = self.encode(__lowerCamelCase ) if len(__lowerCamelCase ) > self.model_max_length: SCREAMING_SNAKE_CASE = input_ids[-self.model_max_length :] logger.warning(f"Trimmed input from conversation as it was longer than {self.model_max_length} tokens." ) return input_ids
698
1
import copy import os from collections import OrderedDict from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __A : List[Any] = logging.get_logger(__name__) __A : Any = { 'google/owlvit-base-patch32': 'https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json', 'google/owlvit-base-patch16': 'https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json', 'google/owlvit-large-patch14': 'https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json', } class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = "owlvit_text_model" def __init__( self : Any , __lowerCamelCase : Dict=49408 , __lowerCamelCase : Dict=512 , __lowerCamelCase : Optional[int]=2048 , __lowerCamelCase : Tuple=12 , __lowerCamelCase : Optional[Any]=8 , __lowerCamelCase : List[str]=16 , __lowerCamelCase : List[Any]="quick_gelu" , __lowerCamelCase : Any=1e-5 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : List[str]=0.02 , __lowerCamelCase : Tuple=1.0 , __lowerCamelCase : int=0 , __lowerCamelCase : List[Any]=49406 , __lowerCamelCase : Tuple=49407 , **__lowerCamelCase : Optional[int] , ): super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase ) SCREAMING_SNAKE_CASE = vocab_size SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = intermediate_size SCREAMING_SNAKE_CASE = num_hidden_layers SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = max_position_embeddings SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = layer_norm_eps SCREAMING_SNAKE_CASE = attention_dropout SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = initializer_factor @classmethod def _snake_case ( cls : List[str] , __lowerCamelCase : Union[str, os.PathLike] , **__lowerCamelCase : Optional[int] ): cls._set_token_in_kwargs(__lowerCamelCase ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = cls.get_config_dict(__lowerCamelCase , **__lowerCamelCase ) # get the text config dict if we are loading from OwlViTConfig if config_dict.get("model_type" ) == "owlvit": SCREAMING_SNAKE_CASE = config_dict["text_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." ) return cls.from_dict(__lowerCamelCase , **__lowerCamelCase ) class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = "owlvit_vision_model" def __init__( self : int , __lowerCamelCase : List[str]=768 , __lowerCamelCase : List[Any]=3072 , __lowerCamelCase : str=12 , __lowerCamelCase : Dict=12 , __lowerCamelCase : List[str]=3 , __lowerCamelCase : Optional[int]=768 , __lowerCamelCase : List[str]=32 , __lowerCamelCase : Optional[int]="quick_gelu" , __lowerCamelCase : Dict=1e-5 , __lowerCamelCase : Optional[Any]=0.0 , __lowerCamelCase : Any=0.02 , __lowerCamelCase : Union[str, Any]=1.0 , **__lowerCamelCase : List[str] , ): super().__init__(**__lowerCamelCase ) SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = intermediate_size SCREAMING_SNAKE_CASE = num_hidden_layers SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = num_channels SCREAMING_SNAKE_CASE = image_size SCREAMING_SNAKE_CASE = patch_size SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = layer_norm_eps SCREAMING_SNAKE_CASE = attention_dropout SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = initializer_factor @classmethod def _snake_case ( cls : List[str] , __lowerCamelCase : Union[str, os.PathLike] , **__lowerCamelCase : List[str] ): cls._set_token_in_kwargs(__lowerCamelCase ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = cls.get_config_dict(__lowerCamelCase , **__lowerCamelCase ) # get the vision config dict if we are loading from OwlViTConfig if config_dict.get("model_type" ) == "owlvit": SCREAMING_SNAKE_CASE = config_dict["vision_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." ) return cls.from_dict(__lowerCamelCase , **__lowerCamelCase ) class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = "owlvit" lowerCamelCase__ = True def __init__( self : Optional[int] , __lowerCamelCase : Any=None , __lowerCamelCase : int=None , __lowerCamelCase : List[str]=512 , __lowerCamelCase : str=2.6_592 , __lowerCamelCase : List[Any]=True , **__lowerCamelCase : str , ): super().__init__(**__lowerCamelCase ) if text_config is None: SCREAMING_SNAKE_CASE = {} logger.info("text_config is None. Initializing the OwlViTTextConfig with default values." ) if vision_config is None: SCREAMING_SNAKE_CASE = {} logger.info("vision_config is None. initializing the OwlViTVisionConfig with default values." ) SCREAMING_SNAKE_CASE = OwlViTTextConfig(**__lowerCamelCase ) SCREAMING_SNAKE_CASE = OwlViTVisionConfig(**__lowerCamelCase ) SCREAMING_SNAKE_CASE = projection_dim SCREAMING_SNAKE_CASE = logit_scale_init_value SCREAMING_SNAKE_CASE = return_dict SCREAMING_SNAKE_CASE = 1.0 @classmethod def _snake_case ( cls : Optional[Any] , __lowerCamelCase : Union[str, os.PathLike] , **__lowerCamelCase : int ): cls._set_token_in_kwargs(__lowerCamelCase ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = cls.get_config_dict(__lowerCamelCase , **__lowerCamelCase ) if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." ) return cls.from_dict(__lowerCamelCase , **__lowerCamelCase ) @classmethod def _snake_case ( cls : Optional[int] , __lowerCamelCase : Dict , __lowerCamelCase : Dict , **__lowerCamelCase : Union[str, Any] ): SCREAMING_SNAKE_CASE = {} SCREAMING_SNAKE_CASE = text_config SCREAMING_SNAKE_CASE = vision_config return cls.from_dict(__lowerCamelCase , **__lowerCamelCase ) def _snake_case ( self : Tuple ): SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ ) SCREAMING_SNAKE_CASE = self.text_config.to_dict() SCREAMING_SNAKE_CASE = self.vision_config.to_dict() SCREAMING_SNAKE_CASE = self.__class__.model_type return output class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' @property def _snake_case ( self : Any ): return OrderedDict( [ ("input_ids", {0: "batch", 1: "sequence"}), ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ("attention_mask", {0: "batch", 1: "sequence"}), ] ) @property def _snake_case ( self : Tuple ): return OrderedDict( [ ("logits_per_image", {0: "batch"}), ("logits_per_text", {0: "batch"}), ("text_embeds", {0: "batch"}), ("image_embeds", {0: "batch"}), ] ) @property def _snake_case ( self : str ): return 1e-4 def _snake_case ( self : Optional[Any] , __lowerCamelCase : "ProcessorMixin" , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : Optional["TensorType"] = None , ): SCREAMING_SNAKE_CASE = super().generate_dummy_inputs( processor.tokenizer , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , framework=__lowerCamelCase ) SCREAMING_SNAKE_CASE = super().generate_dummy_inputs( processor.image_processor , batch_size=__lowerCamelCase , framework=__lowerCamelCase ) return {**text_input_dict, **image_input_dict} @property def _snake_case ( self : List[Any] ): return 14
698
from __future__ import annotations from cmath import sqrt def __a ( A__ : int , A__ : int , A__ : int ): if a == 0: raise ValueError("Coefficient 'a' must not be zero." ) SCREAMING_SNAKE_CASE = b * b - 4 * a * c SCREAMING_SNAKE_CASE = (-b + sqrt(A__ )) / (2 * a) SCREAMING_SNAKE_CASE = (-b - sqrt(A__ )) / (2 * a) return ( root_a.real if not root_a.imag else root_a, root_a.real if not root_a.imag else root_a, ) def __a ( ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = quadratic_roots(a=5 , b=6 , c=1 ) print(F"The solutions are: {solutiona} and {solutiona}" ) if __name__ == "__main__": main()
698
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available, is_vision_available, ) __A : Union[str, Any] = {'configuration_beit': ['BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BeitConfig', 'BeitOnnxConfig']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Union[str, Any] = ['BeitFeatureExtractor'] __A : Dict = ['BeitImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Optional[Any] = [ 'BEIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'BeitForImageClassification', 'BeitForMaskedImageModeling', 'BeitForSemanticSegmentation', 'BeitModel', 'BeitPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Union[str, Any] = [ 'FlaxBeitForImageClassification', 'FlaxBeitForMaskedImageModeling', 'FlaxBeitModel', 'FlaxBeitPreTrainedModel', ] if TYPE_CHECKING: from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_beit import BeitFeatureExtractor from .image_processing_beit import BeitImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_beit import ( BEIT_PRETRAINED_MODEL_ARCHIVE_LIST, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, BeitPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_beit import ( FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel, FlaxBeitPreTrainedModel, ) else: import sys __A : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
698
import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self : Dict ): SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__lowerCamelCase ) SCREAMING_SNAKE_CASE = -1 SCREAMING_SNAKE_CASE = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCamelCase ) SCREAMING_SNAKE_CASE = model.generate(__lowerCamelCase , max_new_tokens=10 , do_sample=__lowerCamelCase ) SCREAMING_SNAKE_CASE = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: SCREAMING_SNAKE_CASE = TextStreamer(__lowerCamelCase ) model.generate(__lowerCamelCase , max_new_tokens=10 , do_sample=__lowerCamelCase , streamer=__lowerCamelCase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer SCREAMING_SNAKE_CASE = cs.out[:-1] self.assertEqual(__lowerCamelCase , __lowerCamelCase ) def _snake_case ( self : int ): SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__lowerCamelCase ) SCREAMING_SNAKE_CASE = -1 SCREAMING_SNAKE_CASE = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCamelCase ) SCREAMING_SNAKE_CASE = model.generate(__lowerCamelCase , max_new_tokens=10 , do_sample=__lowerCamelCase ) SCREAMING_SNAKE_CASE = tokenizer.decode(greedy_ids[0] ) SCREAMING_SNAKE_CASE = TextIteratorStreamer(__lowerCamelCase ) SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer} SCREAMING_SNAKE_CASE = Thread(target=model.generate , kwargs=__lowerCamelCase ) thread.start() SCREAMING_SNAKE_CASE = "" for new_text in streamer: streamer_text += new_text self.assertEqual(__lowerCamelCase , __lowerCamelCase ) def _snake_case ( self : List[str] ): SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__lowerCamelCase ) SCREAMING_SNAKE_CASE = -1 SCREAMING_SNAKE_CASE = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCamelCase ) SCREAMING_SNAKE_CASE = model.generate(__lowerCamelCase , max_new_tokens=10 , do_sample=__lowerCamelCase ) SCREAMING_SNAKE_CASE = greedy_ids[:, input_ids.shape[1] :] SCREAMING_SNAKE_CASE = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: SCREAMING_SNAKE_CASE = TextStreamer(__lowerCamelCase , skip_prompt=__lowerCamelCase ) model.generate(__lowerCamelCase , max_new_tokens=10 , do_sample=__lowerCamelCase , streamer=__lowerCamelCase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer SCREAMING_SNAKE_CASE = cs.out[:-1] self.assertEqual(__lowerCamelCase , __lowerCamelCase ) def _snake_case ( self : int ): # Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested # with actual models -- the dummy models' tokenizers are not aligned with their models, and # `skip_special_tokens=True` has no effect on them SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("distilgpt2" ) SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(__lowerCamelCase ) SCREAMING_SNAKE_CASE = -1 SCREAMING_SNAKE_CASE = torch.ones((1, 5) , device=__lowerCamelCase ).long() * model.config.bos_token_id with CaptureStdout() as cs: SCREAMING_SNAKE_CASE = TextStreamer(__lowerCamelCase , skip_special_tokens=__lowerCamelCase ) model.generate(__lowerCamelCase , max_new_tokens=1 , do_sample=__lowerCamelCase , streamer=__lowerCamelCase ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token SCREAMING_SNAKE_CASE = cs.out[:-1] # Remove the final "\n" SCREAMING_SNAKE_CASE = tokenizer(__lowerCamelCase , return_tensors="pt" ) self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) ) def _snake_case ( self : Optional[int] ): SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__lowerCamelCase ) SCREAMING_SNAKE_CASE = -1 SCREAMING_SNAKE_CASE = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCamelCase ) SCREAMING_SNAKE_CASE = TextIteratorStreamer(__lowerCamelCase , timeout=0.001 ) SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer} SCREAMING_SNAKE_CASE = Thread(target=model.generate , kwargs=__lowerCamelCase ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(__lowerCamelCase ): SCREAMING_SNAKE_CASE = "" for new_text in streamer: streamer_text += new_text
698
1
import os from typing import List, Optional, Union from ...tokenization_utils import PreTrainedTokenizer from ...tokenization_utils_base import AddedToken from ...utils import logging __A : List[str] = logging.get_logger(__name__) __A : str = {'vocab_file': 'vocab.txt'} __A : Tuple = { 'vocab_file': { 'facebook/esm2_t6_8M_UR50D': 'https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt', 'facebook/esm2_t12_35M_UR50D': 'https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt', }, } __A : Optional[Any] = { 'facebook/esm2_t6_8M_UR50D': 1_0_2_4, 'facebook/esm2_t12_35M_UR50D': 1_0_2_4, } def __a ( A__ : Union[str, Any] ): with open(A__ , "r" ) as f: SCREAMING_SNAKE_CASE = f.read().splitlines() return [l.strip() for l in lines] class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = VOCAB_FILES_NAMES lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__ = ["input_ids", "attention_mask"] def __init__( self : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int]="<unk>" , __lowerCamelCase : List[Any]="<cls>" , __lowerCamelCase : List[Any]="<pad>" , __lowerCamelCase : Optional[Any]="<mask>" , __lowerCamelCase : Optional[int]="<eos>" , **__lowerCamelCase : int , ): super().__init__(**__lowerCamelCase ) SCREAMING_SNAKE_CASE = load_vocab_file(__lowerCamelCase ) SCREAMING_SNAKE_CASE = dict(enumerate(self.all_tokens ) ) SCREAMING_SNAKE_CASE = {tok: ind for ind, tok in enumerate(self.all_tokens )} SCREAMING_SNAKE_CASE = unk_token SCREAMING_SNAKE_CASE = cls_token SCREAMING_SNAKE_CASE = pad_token SCREAMING_SNAKE_CASE = mask_token SCREAMING_SNAKE_CASE = eos_token SCREAMING_SNAKE_CASE = self.all_tokens self._create_trie(self.unique_no_split_tokens ) def _snake_case ( self : Optional[int] , __lowerCamelCase : int ): return self._id_to_token.get(__lowerCamelCase , self.unk_token ) def _snake_case ( self : Union[str, Any] , __lowerCamelCase : str ): return self._token_to_id.get(__lowerCamelCase , self._token_to_id.get(self.unk_token ) ) def _snake_case ( self : Optional[int] , __lowerCamelCase : str , **__lowerCamelCase : Tuple ): return text.split() def _snake_case ( self : int , __lowerCamelCase : int=False ): return len(self._id_to_token ) def _snake_case ( self : List[Any] ): return {token: i for i, token in enumerate(self.all_tokens )} def _snake_case ( self : Union[str, Any] , __lowerCamelCase : str ): return self._token_to_id.get(__lowerCamelCase , self._token_to_id.get(self.unk_token ) ) def _snake_case ( self : List[str] , __lowerCamelCase : int ): return self._id_to_token.get(__lowerCamelCase , self.unk_token ) def _snake_case ( self : Dict , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ): SCREAMING_SNAKE_CASE = [self.cls_token_id] SCREAMING_SNAKE_CASE = [self.eos_token_id] # No sep token in ESM vocabulary if token_ids_a is None: if self.eos_token_id is None: return cls + token_ids_a else: return cls + token_ids_a + sep elif self.eos_token_id is None: raise ValueError("Cannot tokenize multiple sequences when EOS token is not set!" ) return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token def _snake_case ( self : int , __lowerCamelCase : List , __lowerCamelCase : Optional[List] = None , __lowerCamelCase : bool = False ): if already_has_special_tokens: if token_ids_a is not None: raise ValueError( "You should not supply a second sequence if the provided sequence of " "ids is already formatted with special tokens for the model." ) return [1 if token in self.all_special_ids else 0 for token in token_ids_a] SCREAMING_SNAKE_CASE = [1] + ([0] * len(__lowerCamelCase )) + [1] if token_ids_a is not None: mask += [0] * len(__lowerCamelCase ) + [1] return mask def _snake_case ( self : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] ): SCREAMING_SNAKE_CASE = os.path.join(__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + "vocab.txt" ) with open(__lowerCamelCase , "w" ) as f: f.write("\n".join(self.all_tokens ) ) return (vocab_file,) @property def _snake_case ( self : Tuple ): return self.get_vocab_size(with_added_tokens=__lowerCamelCase ) def _snake_case ( self : Union[str, Any] , __lowerCamelCase : Union[List[str], List[AddedToken]] , __lowerCamelCase : bool = False ): return super()._add_tokens(__lowerCamelCase , special_tokens=__lowerCamelCase )
698
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) __A : Union[str, Any] = {'configuration_fnet': ['FNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FNetConfig']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : List[str] = ['FNetTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Any = ['FNetTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Union[str, Any] = [ 'FNET_PRETRAINED_MODEL_ARCHIVE_LIST', 'FNetForMaskedLM', 'FNetForMultipleChoice', 'FNetForNextSentencePrediction', 'FNetForPreTraining', 'FNetForQuestionAnswering', 'FNetForSequenceClassification', 'FNetForTokenClassification', 'FNetLayer', 'FNetModel', 'FNetPreTrainedModel', ] if TYPE_CHECKING: from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet import FNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet_fast import FNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_fnet import ( FNET_PRETRAINED_MODEL_ARCHIVE_LIST, FNetForMaskedLM, FNetForMultipleChoice, FNetForNextSentencePrediction, FNetForPreTraining, FNetForQuestionAnswering, FNetForSequenceClassification, FNetForTokenClassification, FNetLayer, FNetModel, FNetPreTrainedModel, ) else: import sys __A : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
698
1
import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BeitImageProcessor class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def __init__( self : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any=7 , __lowerCamelCase : List[str]=3 , __lowerCamelCase : int=18 , __lowerCamelCase : Union[str, Any]=30 , __lowerCamelCase : int=400 , __lowerCamelCase : Tuple=True , __lowerCamelCase : Dict=None , __lowerCamelCase : Dict=True , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Any=[0.5, 0.5, 0.5] , __lowerCamelCase : str=[0.5, 0.5, 0.5] , __lowerCamelCase : int=False , ): SCREAMING_SNAKE_CASE = size if size is not None else {"height": 20, "width": 20} SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else {"height": 18, "width": 18} SCREAMING_SNAKE_CASE = parent SCREAMING_SNAKE_CASE = batch_size SCREAMING_SNAKE_CASE = num_channels SCREAMING_SNAKE_CASE = image_size SCREAMING_SNAKE_CASE = min_resolution SCREAMING_SNAKE_CASE = max_resolution SCREAMING_SNAKE_CASE = do_resize SCREAMING_SNAKE_CASE = size SCREAMING_SNAKE_CASE = do_center_crop SCREAMING_SNAKE_CASE = crop_size SCREAMING_SNAKE_CASE = do_normalize SCREAMING_SNAKE_CASE = image_mean SCREAMING_SNAKE_CASE = image_std SCREAMING_SNAKE_CASE = do_reduce_labels def _snake_case ( self : List[str] ): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_reduce_labels": self.do_reduce_labels, } def __a ( ): SCREAMING_SNAKE_CASE = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" ) SCREAMING_SNAKE_CASE = Image.open(dataset[0]["file"] ) SCREAMING_SNAKE_CASE = Image.open(dataset[1]["file"] ) return image, map def __a ( ): SCREAMING_SNAKE_CASE = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" ) SCREAMING_SNAKE_CASE = Image.open(ds[0]["file"] ) SCREAMING_SNAKE_CASE = Image.open(ds[1]["file"] ) SCREAMING_SNAKE_CASE = Image.open(ds[2]["file"] ) SCREAMING_SNAKE_CASE = Image.open(ds[3]["file"] ) return [imagea, imagea], [mapa, mapa] @require_torch @require_vision class _SCREAMING_SNAKE_CASE ( __snake_case , unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = BeitImageProcessor if is_vision_available() else None def _snake_case ( self : Dict ): SCREAMING_SNAKE_CASE = BeitImageProcessingTester(self ) @property def _snake_case ( self : List[str] ): return self.image_processor_tester.prepare_image_processor_dict() def _snake_case ( self : Optional[Any] ): SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowerCamelCase , "do_resize" ) ) self.assertTrue(hasattr(__lowerCamelCase , "size" ) ) self.assertTrue(hasattr(__lowerCamelCase , "do_center_crop" ) ) self.assertTrue(hasattr(__lowerCamelCase , "center_crop" ) ) self.assertTrue(hasattr(__lowerCamelCase , "do_normalize" ) ) self.assertTrue(hasattr(__lowerCamelCase , "image_mean" ) ) self.assertTrue(hasattr(__lowerCamelCase , "image_std" ) ) def _snake_case ( self : List[str] ): SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"height": 20, "width": 20} ) self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} ) self.assertEqual(image_processor.do_reduce_labels , __lowerCamelCase ) SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=__lowerCamelCase ) self.assertEqual(image_processor.size , {"height": 42, "width": 42} ) self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} ) self.assertEqual(image_processor.do_reduce_labels , __lowerCamelCase ) def _snake_case ( self : Dict ): pass def _snake_case ( self : List[Any] ): # Initialize image_processing SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) # create random PIL images SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase ) for image in image_inputs: self.assertIsInstance(__lowerCamelCase , Image.Image ) # Test not batched input SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched SCREAMING_SNAKE_CASE = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def _snake_case ( self : str ): # Initialize image_processing SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase ) for image in image_inputs: self.assertIsInstance(__lowerCamelCase , np.ndarray ) # Test not batched input SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched SCREAMING_SNAKE_CASE = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def _snake_case ( self : Tuple ): # Initialize image_processing SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase ) for image in image_inputs: self.assertIsInstance(__lowerCamelCase , torch.Tensor ) # Test not batched input SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched SCREAMING_SNAKE_CASE = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def _snake_case ( self : Tuple ): # Initialize image_processing SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase ) SCREAMING_SNAKE_CASE = [] for image in image_inputs: self.assertIsInstance(__lowerCamelCase , torch.Tensor ) maps.append(torch.zeros(image.shape[-2:] ).long() ) # Test not batched input SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , maps[0] , return_tensors="pt" ) self.assertEqual( encoding["pixel_values"].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) self.assertEqual( encoding["labels"].shape , ( 1, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) self.assertEqual(encoding["labels"].dtype , torch.long ) self.assertTrue(encoding["labels"].min().item() >= 0 ) self.assertTrue(encoding["labels"].max().item() <= 255 ) # Test batched SCREAMING_SNAKE_CASE = image_processing(__lowerCamelCase , __lowerCamelCase , return_tensors="pt" ) self.assertEqual( encoding["pixel_values"].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) self.assertEqual( encoding["labels"].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) self.assertEqual(encoding["labels"].dtype , torch.long ) self.assertTrue(encoding["labels"].min().item() >= 0 ) self.assertTrue(encoding["labels"].max().item() <= 255 ) # Test not batched input (PIL images) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = prepare_semantic_single_inputs() SCREAMING_SNAKE_CASE = image_processing(__lowerCamelCase , __lowerCamelCase , return_tensors="pt" ) self.assertEqual( encoding["pixel_values"].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) self.assertEqual( encoding["labels"].shape , ( 1, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) self.assertEqual(encoding["labels"].dtype , torch.long ) self.assertTrue(encoding["labels"].min().item() >= 0 ) self.assertTrue(encoding["labels"].max().item() <= 255 ) # Test batched input (PIL images) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = prepare_semantic_batch_inputs() SCREAMING_SNAKE_CASE = image_processing(__lowerCamelCase , __lowerCamelCase , return_tensors="pt" ) self.assertEqual( encoding["pixel_values"].shape , ( 2, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) self.assertEqual( encoding["labels"].shape , ( 2, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) self.assertEqual(encoding["labels"].dtype , torch.long ) self.assertTrue(encoding["labels"].min().item() >= 0 ) self.assertTrue(encoding["labels"].max().item() <= 255 ) def _snake_case ( self : str ): # Initialize image_processing SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) # ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150 SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = prepare_semantic_single_inputs() SCREAMING_SNAKE_CASE = image_processing(__lowerCamelCase , __lowerCamelCase , return_tensors="pt" ) self.assertTrue(encoding["labels"].min().item() >= 0 ) self.assertTrue(encoding["labels"].max().item() <= 150 ) SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = image_processing(__lowerCamelCase , __lowerCamelCase , return_tensors="pt" ) self.assertTrue(encoding["labels"].min().item() >= 0 ) self.assertTrue(encoding["labels"].max().item() <= 255 )
698
from collections import Counter import numpy as np from sklearn import datasets from sklearn.model_selection import train_test_split __A : Optional[Any] = datasets.load_iris() __A : Optional[Any] = np.array(data['data']) __A : Optional[int] = np.array(data['target']) __A : Union[str, Any] = data['target_names'] __A , __A , __A , __A : Optional[int] = train_test_split(X, y) def __a ( A__ : Optional[int] , A__ : Dict ): return np.linalg.norm(np.array(A__ ) - np.array(A__ ) ) def __a ( A__ : Optional[Any] , A__ : int , A__ : Dict , A__ : Optional[Any] , A__ : Dict=5 ): SCREAMING_SNAKE_CASE = zip(A__ , A__ ) # List of distances of all points from the point to be classified SCREAMING_SNAKE_CASE = [] for data_point in data: SCREAMING_SNAKE_CASE = euclidean_distance(data_point[0] , A__ ) distances.append((distance, data_point[1]) ) # Choosing 'k' points with the least distances. SCREAMING_SNAKE_CASE = [i[1] for i in sorted(A__ )[:k]] # Most commonly occurring class among them # is the class into which the point is classified SCREAMING_SNAKE_CASE = Counter(A__ ).most_common(1 )[0][0] return classes[result] if __name__ == "__main__": print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
698
1