code
stringlengths
82
53.2k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
'''simple docstring''' from __future__ import annotations import inspect import unittest from transformers import ViTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTForImageClassification, TFViTModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class UpperCAmelCase_ : '''simple docstring''' def __init__( self : Optional[int] , snake_case__ : int , snake_case__ : Tuple=13 , snake_case__ : Tuple=30 , snake_case__ : Optional[Any]=2 , snake_case__ : Optional[Any]=3 , snake_case__ : Optional[int]=True , snake_case__ : Union[str, Any]=True , snake_case__ : Union[str, Any]=32 , snake_case__ : List[Any]=2 , snake_case__ : Optional[Any]=4 , snake_case__ : Dict=37 , snake_case__ : List[str]="gelu" , snake_case__ : Dict=0.1 , snake_case__ : Tuple=0.1 , snake_case__ : Optional[Any]=10 , snake_case__ : List[Any]=0.02 , snake_case__ : Tuple=3 , snake_case__ : Optional[Any]=None , ): '''simple docstring''' UpperCAmelCase__ : Tuple = parent UpperCAmelCase__ : Union[str, Any] = batch_size UpperCAmelCase__ : Dict = image_size UpperCAmelCase__ : Dict = patch_size UpperCAmelCase__ : Dict = num_channels UpperCAmelCase__ : Dict = is_training UpperCAmelCase__ : List[Any] = use_labels UpperCAmelCase__ : Optional[Any] = hidden_size UpperCAmelCase__ : Optional[int] = num_hidden_layers UpperCAmelCase__ : Any = num_attention_heads UpperCAmelCase__ : Union[str, Any] = intermediate_size UpperCAmelCase__ : Optional[int] = hidden_act UpperCAmelCase__ : Optional[Any] = hidden_dropout_prob UpperCAmelCase__ : Optional[Any] = attention_probs_dropout_prob UpperCAmelCase__ : Tuple = type_sequence_label_size UpperCAmelCase__ : List[Any] = initializer_range UpperCAmelCase__ : Optional[Any] = scope # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) UpperCAmelCase__ : Optional[int] = (image_size // patch_size) ** 2 UpperCAmelCase__ : List[str] = num_patches + 1 def UpperCamelCase ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase__ : str = None if self.use_labels: UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase__ : str = self.get_config() return config, pixel_values, labels def UpperCamelCase ( self : List[str] ): '''simple docstring''' return ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case__ , initializer_range=self.initializer_range , ) def UpperCamelCase ( self : int , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : str ): '''simple docstring''' UpperCAmelCase__ : str = TFViTModel(config=snake_case__ ) UpperCAmelCase__ : str = model(snake_case__ , training=snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # Test with an image with different size than the one specified in config. UpperCAmelCase__ : Union[str, Any] = self.image_size // 2 UpperCAmelCase__ : List[Any] = pixel_values[:, :, :image_size, :image_size] UpperCAmelCase__ : Optional[Any] = model(snake_case__ , interpolate_pos_encoding=snake_case__ , training=snake_case__ ) UpperCAmelCase__ : List[Any] = (image_size // self.patch_size) ** 2 + 1 self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) ) def UpperCamelCase ( self : Tuple , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : Any ): '''simple docstring''' UpperCAmelCase__ : Any = self.type_sequence_label_size UpperCAmelCase__ : List[Any] = TFViTForImageClassification(snake_case__ ) UpperCAmelCase__ : List[str] = model(snake_case__ , labels=snake_case__ , training=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # Test with an image with different size than the one specified in config. UpperCAmelCase__ : Dict = self.image_size // 2 UpperCAmelCase__ : List[Any] = pixel_values[:, :, :image_size, :image_size] UpperCAmelCase__ : Tuple = model(snake_case__ , interpolate_pos_encoding=snake_case__ , training=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images UpperCAmelCase__ : Tuple = 1 UpperCAmelCase__ : Optional[int] = TFViTForImageClassification(snake_case__ ) UpperCAmelCase__ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCAmelCase__ : List[Any] = model(snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def UpperCamelCase ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs() UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = config_and_inputs UpperCAmelCase__ : Tuple = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class UpperCAmelCase_ ( A , A , unittest.TestCase ): '''simple docstring''' lowercase_ : Optional[Any] = (TFViTModel, TFViTForImageClassification) if is_tf_available() else () lowercase_ : Tuple = ( {"feature-extraction": TFViTModel, "image-classification": TFViTForImageClassification} if is_tf_available() else {} ) lowercase_ : List[Any] = False lowercase_ : Optional[int] = False lowercase_ : Any = False def UpperCamelCase ( self : str ): '''simple docstring''' UpperCAmelCase__ : Dict = TFViTModelTester(self ) UpperCAmelCase__ : Any = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=37 ) def UpperCamelCase ( self : Optional[Any] ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="ViT does not use inputs_embeds" ) def UpperCamelCase ( self : Optional[Any] ): '''simple docstring''' pass @unittest.skip(reason="ViT does not use inputs_embeds" ) def UpperCamelCase ( self : List[Any] ): '''simple docstring''' pass def UpperCamelCase ( self : str ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ : int = model_class(snake_case__ ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) UpperCAmelCase__ : Tuple = model.get_output_embeddings() self.assertTrue(x is None or isinstance(snake_case__ , tf.keras.layers.Layer ) ) def UpperCamelCase ( self : int ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ : Any = model_class(snake_case__ ) UpperCAmelCase__ : List[Any] = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase__ : List[Any] = [*signature.parameters.keys()] UpperCAmelCase__ : Dict = ["pixel_values"] self.assertListEqual(arg_names[:1] , snake_case__ ) def UpperCamelCase ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) def UpperCamelCase ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case__ ) @slow def UpperCamelCase ( self : str ): '''simple docstring''' UpperCAmelCase__ : int = TFViTModel.from_pretrained("google/vit-base-patch16-224" ) self.assertIsNotNone(snake_case__ ) def snake_case_ ( ): UpperCAmelCase__ : int = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_tf @require_vision class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def UpperCamelCase ( self : Optional[Any] ): '''simple docstring''' return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None @slow def UpperCamelCase ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Dict = TFViTForImageClassification.from_pretrained("google/vit-base-patch16-224" ) UpperCAmelCase__ : Optional[int] = self.default_image_processor UpperCAmelCase__ : Tuple = prepare_img() UpperCAmelCase__ : Union[str, Any] = image_processor(images=snake_case__ , return_tensors="tf" ) # forward pass UpperCAmelCase__ : List[Any] = model(**snake_case__ ) # verify the logits UpperCAmelCase__ : Any = tf.TensorShape((1, 10_00) ) self.assertEqual(outputs.logits.shape , snake_case__ ) UpperCAmelCase__ : List[Any] = tf.constant([-0.2744, 0.8215, -0.0836] ) tf.debugging.assert_near(outputs.logits[0, :3] , snake_case__ , atol=1e-4 )
199
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available SCREAMING_SNAKE_CASE = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE = ["""GPTSw3Tokenizer"""] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt_swa import GPTSwaTokenizer else: import sys SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
199
1
'''simple docstring''' import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def a ( __a ) -> str: '''simple docstring''' UpperCamelCase__ :List[Any] = filter(lambda __a : p.requires_grad , model.parameters() ) UpperCamelCase__ :Tuple = sum([np.prod(p.size() ) for p in model_parameters] ) return params __snake_case = logging.getLogger(__name__) def a ( __a , __a ) -> str: '''simple docstring''' if metric == "rouge2": UpperCamelCase__ :List[str] = '''{val_avg_rouge2:.4f}-{step_count}''' elif metric == "bleu": UpperCamelCase__ :Tuple = '''{val_avg_bleu:.4f}-{step_count}''' elif metric == "em": UpperCamelCase__ :Tuple = '''{val_avg_em:.4f}-{step_count}''' else: raise NotImplementedError( f'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this''' ''' function.''' ) UpperCamelCase__ :Any = ModelCheckpoint( dirpath=__a , filename=__a , monitor=f'''val_{metric}''' , mode='''max''' , save_top_k=3 , every_n_epochs=1 , ) return checkpoint_callback def a ( __a , __a ) -> int: '''simple docstring''' return EarlyStopping( monitor=f'''val_{metric}''' , mode='''min''' if '''loss''' in metric else '''max''' , patience=__a , verbose=__a , ) class lowercase ( pl.Callback ): """simple docstring""" def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ): '''simple docstring''' UpperCamelCase__ :str = {F'''lr_group_{i}''': param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )} pl_module.logger.log_metrics(UpperCamelCase_ ) @rank_zero_only def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=True ): '''simple docstring''' logger.info(F'''***** {type_path} results at step {trainer.global_step:05d} *****''' ) UpperCamelCase__ :List[Any] = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} ) # Log results UpperCamelCase__ :Optional[Any] = Path(pl_module.hparams.output_dir ) if type_path == "test": UpperCamelCase__ :Optional[int] = od / '''test_results.txt''' UpperCamelCase__ :int = od / '''test_generations.txt''' else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. UpperCamelCase__ :Tuple = od / F'''{type_path}_results/{trainer.global_step:05d}.txt''' UpperCamelCase__ :List[str] = od / F'''{type_path}_generations/{trainer.global_step:05d}.txt''' results_file.parent.mkdir(exist_ok=UpperCamelCase_ ) generations_file.parent.mkdir(exist_ok=UpperCamelCase_ ) with open(UpperCamelCase_ , '''a+''' ) as writer: for key in sorted(UpperCamelCase_ ): if key in ["log", "progress_bar", "preds"]: continue UpperCamelCase__ :int = metrics[key] if isinstance(UpperCamelCase_ , torch.Tensor ): UpperCamelCase__ :int = val.item() UpperCamelCase__ :Any = F'''{key}: {val:.6f}\n''' writer.write(UpperCamelCase_ ) if not save_generations: return if "preds" in metrics: UpperCamelCase__ :List[Any] = '''\n'''.join(metrics['''preds'''] ) generations_file.open('''w+''' ).write(UpperCamelCase_ ) @rank_zero_only def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ): '''simple docstring''' try: UpperCamelCase__ :Any = pl_module.model.model.num_parameters() except AttributeError: UpperCamelCase__ :Optional[Any] = pl_module.model.num_parameters() UpperCamelCase__ :Optional[int] = count_trainable_parameters(UpperCamelCase_ ) # mp stands for million parameters trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1e6, '''grad_mp''': n_trainable_pars / 1e6} ) @rank_zero_only def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ): '''simple docstring''' save_json(pl_module.metrics , pl_module.metrics_save_path ) return self._write_logs(UpperCamelCase_ , UpperCamelCase_ , '''test''' ) @rank_zero_only def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ): '''simple docstring''' save_json(pl_module.metrics , pl_module.metrics_save_path ) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
280
'''simple docstring''' from __future__ import annotations import csv import requests from bsa import BeautifulSoup def a ( __a = "" ) -> dict[str, float]: '''simple docstring''' UpperCamelCase__ :Optional[Any] = url or '''https://www.imdb.com/chart/top/?ref_=nv_mv_250''' UpperCamelCase__ :Optional[int] = BeautifulSoup(requests.get(__a ).text , '''html.parser''' ) UpperCamelCase__ :Tuple = soup.find_all('''td''' , attrs='''titleColumn''' ) UpperCamelCase__ :Union[str, Any] = soup.find_all('''td''' , class_='''ratingColumn imdbRating''' ) return { title.a.text: float(rating.strong.text ) for title, rating in zip(__a , __a ) } def a ( __a = "IMDb_Top_250_Movies.csv" ) -> None: '''simple docstring''' UpperCamelCase__ :Optional[Any] = get_imdb_top_aaa_movies() with open(__a , '''w''' , newline='''''' ) as out_file: UpperCamelCase__ :int = csv.writer(__a ) writer.writerow(['''Movie title''', '''IMDb rating'''] ) for title, rating in movies.items(): writer.writerow([title, rating] ) if __name__ == "__main__": write_movies()
280
1
'''simple docstring''' import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, TaTokenizer def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=True , _UpperCamelCase="pt" ): """simple docstring""" lowercase_ : Union[str, Any] = {"add_prefix_space": True} if isinstance(_UpperCamelCase , _UpperCamelCase ) and not line.startswith(" " ) else {} lowercase_ : Optional[int] = padding_side return tokenizer( [line] , max_length=_UpperCamelCase , padding="max_length" if pad_to_max_length else None , truncation=_UpperCamelCase , return_tensors=_UpperCamelCase , add_special_tokens=_UpperCamelCase , **_UpperCamelCase , ) def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None , ): """simple docstring""" lowercase_ : Optional[Any] = input_ids.ne(_UpperCamelCase ).any(dim=0 ) if attention_mask is None: return input_ids[:, keep_column_mask] else: return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask]) class _UpperCAmelCase ( snake_case ): def __init__( self : Any , a : Optional[Any] , a : Any , a : Optional[int] , a : Optional[Any] , a : Dict="train" , a : List[Any]=None , a : Tuple=None , a : Optional[int]=None , a : Optional[int]="" , ): '''simple docstring''' super().__init__() lowercase_ : str = Path(a ).joinpath(type_path + ".source" ) lowercase_ : Union[str, Any] = Path(a ).joinpath(type_path + ".target" ) lowercase_ : Optional[int] = self.get_char_lens(self.src_file ) lowercase_ : Union[str, Any] = max_source_length lowercase_ : List[str] = max_target_length assert min(self.src_lens ) > 0, f"""found empty line in {self.src_file}""" lowercase_ : List[Any] = tokenizer lowercase_ : List[str] = prefix if n_obs is not None: lowercase_ : List[str] = self.src_lens[:n_obs] lowercase_ : List[Any] = src_lang lowercase_ : Tuple = tgt_lang def __len__( self : Dict ): '''simple docstring''' return len(self.src_lens ) def __getitem__( self : str , a : Union[str, Any] ): '''simple docstring''' lowercase_ : Optional[Any] = index + 1 # linecache starts at 1 lowercase_ : Tuple = self.prefix + linecache.getline(str(self.src_file ) , a ).rstrip("\n" ) lowercase_ : Optional[int] = linecache.getline(str(self.tgt_file ) , a ).rstrip("\n" ) assert source_line, f"""empty source line for index {index}""" assert tgt_line, f"""empty tgt line for index {index}""" # Need to add eos token manually for T5 if isinstance(self.tokenizer , a ): source_line += self.tokenizer.eos_token tgt_line += self.tokenizer.eos_token # Pad source and target to the right lowercase_ : str = ( self.tokenizer.question_encoder if isinstance(self.tokenizer , a ) else self.tokenizer ) lowercase_ : str = self.tokenizer.generator if isinstance(self.tokenizer , a ) else self.tokenizer lowercase_ : Union[str, Any] = encode_line(a , a , self.max_source_length , "right" ) lowercase_ : Tuple = encode_line(a , a , self.max_target_length , "right" ) lowercase_ : int = source_inputs["input_ids"].squeeze() lowercase_ : Optional[int] = target_inputs["input_ids"].squeeze() lowercase_ : Optional[int] = source_inputs["attention_mask"].squeeze() return { "input_ids": source_ids, "attention_mask": src_mask, "decoder_input_ids": target_ids, } @staticmethod def lowerCAmelCase__ ( a : Dict ): '''simple docstring''' return [len(a ) for x in Path(a ).open().readlines()] def lowerCAmelCase__ ( self : str , a : Any ): '''simple docstring''' lowercase_ : Dict = torch.stack([x["input_ids"] for x in batch] ) lowercase_ : Any = torch.stack([x["attention_mask"] for x in batch] ) lowercase_ : Optional[int] = torch.stack([x["decoder_input_ids"] for x in batch] ) lowercase_ : Union[str, Any] = ( self.tokenizer.generator.pad_token_id if isinstance(self.tokenizer , a ) else self.tokenizer.pad_token_id ) lowercase_ : Any = ( self.tokenizer.question_encoder.pad_token_id if isinstance(self.tokenizer , a ) else self.tokenizer.pad_token_id ) lowercase_ : Optional[int] = trim_batch(a , a ) lowercase_ , lowercase_ : Any = trim_batch(a , a , attention_mask=a ) lowercase_ : str = { "input_ids": source_ids, "attention_mask": source_mask, "decoder_input_ids": y, } return batch UpperCamelCase__ = getLogger(__name__) def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ): """simple docstring""" return list(itertools.chain.from_iterable(_UpperCamelCase ) ) def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ): """simple docstring""" lowercase_ : List[str] = get_git_info() save_json(_UpperCamelCase , os.path.join(_UpperCamelCase , "git_log.json" ) ) def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=4 , **_UpperCamelCase ): """simple docstring""" with open(_UpperCamelCase , "w" ) as f: json.dump(_UpperCamelCase , _UpperCamelCase , indent=_UpperCamelCase , **_UpperCamelCase ) def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ): """simple docstring""" with open(_UpperCamelCase ) as f: return json.load(_UpperCamelCase ) def __SCREAMING_SNAKE_CASE ( ): """simple docstring""" lowercase_ : Tuple = git.Repo(search_parent_directories=_UpperCamelCase ) lowercase_ : Optional[Any] = { "repo_id": str(_UpperCamelCase ), "repo_sha": str(repo.head.object.hexsha ), "repo_branch": str(repo.active_branch ), "hostname": str(socket.gethostname() ), } return repo_infos def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ): """simple docstring""" return list(map(_UpperCamelCase , _UpperCamelCase ) ) def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ): """simple docstring""" with open(_UpperCamelCase , "wb" ) as f: return pickle.dump(_UpperCamelCase , _UpperCamelCase ) def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ): """simple docstring""" def remove_articles(_UpperCamelCase ): return re.sub(R"\b(a|an|the)\b" , " " , _UpperCamelCase ) def white_space_fix(_UpperCamelCase ): return " ".join(text.split() ) def remove_punc(_UpperCamelCase ): lowercase_ : Optional[Any] = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(_UpperCamelCase ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(_UpperCamelCase ) ) ) ) def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ): """simple docstring""" lowercase_ : Any = normalize_answer(_UpperCamelCase ).split() lowercase_ : Tuple = normalize_answer(_UpperCamelCase ).split() lowercase_ : List[str] = Counter(_UpperCamelCase ) & Counter(_UpperCamelCase ) lowercase_ : Optional[int] = sum(common.values() ) if num_same == 0: return 0 lowercase_ : int = 1.0 * num_same / len(_UpperCamelCase ) lowercase_ : Union[str, Any] = 1.0 * num_same / len(_UpperCamelCase ) lowercase_ : Optional[Any] = (2 * precision * recall) / (precision + recall) return fa def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ): """simple docstring""" return normalize_answer(_UpperCamelCase ) == normalize_answer(_UpperCamelCase ) def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ): """simple docstring""" assert len(_UpperCamelCase ) == len(_UpperCamelCase ) lowercase_ : Dict = 0 for hypo, pred in zip(_UpperCamelCase , _UpperCamelCase ): em += exact_match_score(_UpperCamelCase , _UpperCamelCase ) if len(_UpperCamelCase ) > 0: em /= len(_UpperCamelCase ) return {"em": em} def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ): """simple docstring""" return model_prefix.startswith("rag" ) def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): """simple docstring""" lowercase_ : str = {p: p for p in extra_params} # T5 models don't have `dropout` param, they have `dropout_rate` instead lowercase_ : List[Any] = "dropout_rate" for p in extra_params: if getattr(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): if not hasattr(_UpperCamelCase , _UpperCamelCase ) and not hasattr(_UpperCamelCase , equivalent_param[p] ): logger.info("config doesn't have a `{}` attribute".format(_UpperCamelCase ) ) delattr(_UpperCamelCase , _UpperCamelCase ) continue lowercase_ : Any = p if hasattr(_UpperCamelCase , _UpperCamelCase ) else equivalent_param[p] setattr(_UpperCamelCase , _UpperCamelCase , getattr(_UpperCamelCase , _UpperCamelCase ) ) delattr(_UpperCamelCase , _UpperCamelCase ) return hparams, config
620
'''simple docstring''' import argparse import torch from transformers import ( WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaForAudioFrameClassification, WavaVecaForSequenceClassification, WavaVecaForXVector, logging, ) logging.set_verbosity_info() UpperCamelCase__ = logging.get_logger(__name__) def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): """simple docstring""" lowercase_ : Optional[Any] = WavaVecaForSequenceClassification.from_pretrained(_UpperCamelCase , config=_UpperCamelCase ) lowercase_ : Optional[int] = downstream_dict["projector.weight"] lowercase_ : str = downstream_dict["projector.bias"] lowercase_ : int = downstream_dict["model.post_net.linear.weight"] lowercase_ : Optional[Any] = downstream_dict["model.post_net.linear.bias"] return model def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): """simple docstring""" lowercase_ : Tuple = WavaVecaForAudioFrameClassification.from_pretrained(_UpperCamelCase , config=_UpperCamelCase ) lowercase_ : Any = downstream_dict["model.linear.weight"] lowercase_ : List[str] = downstream_dict["model.linear.bias"] return model def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): """simple docstring""" lowercase_ : Any = WavaVecaForXVector.from_pretrained(_UpperCamelCase , config=_UpperCamelCase ) lowercase_ : str = downstream_dict["connector.weight"] lowercase_ : List[str] = downstream_dict["connector.bias"] for i, kernel_size in enumerate(hf_config.tdnn_kernel ): lowercase_ : Union[str, Any] = downstream_dict[ F"""model.framelevel_feature_extractor.module.{i}.kernel.weight""" ] lowercase_ : Dict = downstream_dict[F"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""] lowercase_ : Dict = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"] lowercase_ : List[Any] = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"] lowercase_ : Dict = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"] lowercase_ : Dict = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"] lowercase_ : Optional[Any] = downstream_dict["objective.W"] return model @torch.no_grad() def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): """simple docstring""" lowercase_ : Tuple = torch.load(_UpperCamelCase , map_location="cpu" ) lowercase_ : Dict = checkpoint["Downstream"] lowercase_ : Optional[Any] = WavaVecaConfig.from_pretrained(_UpperCamelCase ) lowercase_ : Dict = WavaVecaFeatureExtractor.from_pretrained( _UpperCamelCase , return_attention_mask=_UpperCamelCase , do_normalize=_UpperCamelCase ) lowercase_ : Dict = hf_config.architectures[0] if arch.endswith("ForSequenceClassification" ): lowercase_ : Any = convert_classification(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) elif arch.endswith("ForAudioFrameClassification" ): lowercase_ : Optional[int] = convert_diarization(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) elif arch.endswith("ForXVector" ): lowercase_ : List[Any] = convert_xvector(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) else: raise NotImplementedError(F"""S3PRL weights conversion is not supported for {arch}""" ) if hf_config.use_weighted_layer_sum: lowercase_ : List[str] = checkpoint["Featurizer"]["weights"] hf_feature_extractor.save_pretrained(_UpperCamelCase ) hf_model.save_pretrained(_UpperCamelCase ) if __name__ == "__main__": UpperCamelCase__ = argparse.ArgumentParser() parser.add_argument( '--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.' ) parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.') parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.') UpperCamelCase__ = parser.parse_args() convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
620
1
from ..utils import DummyObject, requires_backends class _UpperCamelCase ( metaclass=_UpperCAmelCase ): """simple docstring""" __a : Optional[Any] = ['''onnx'''] def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[Any]: '''simple docstring''' requires_backends(self , ['''onnx'''] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[int]: '''simple docstring''' requires_backends(cls , ['''onnx'''] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[str]: '''simple docstring''' requires_backends(cls , ['''onnx'''] )
522
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __a : Any = logging.get_logger(__name__) __a : Dict = { """hustvl/yolos-small""": """https://huggingface.co/hustvl/yolos-small/resolve/main/config.json""", # See all YOLOS models at https://huggingface.co/models?filter=yolos } class _UpperCamelCase ( _UpperCAmelCase ): """simple docstring""" __a : Optional[Any] = '''yolos''' def __init__( self , lowerCAmelCase__=7_68 , lowerCAmelCase__=12 , lowerCAmelCase__=12 , lowerCAmelCase__=30_72 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=[5_12, 8_64] , lowerCAmelCase__=16 , lowerCAmelCase__=3 , lowerCAmelCase__=True , lowerCAmelCase__=1_00 , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=1 , lowerCAmelCase__=5 , lowerCAmelCase__=2 , lowerCAmelCase__=5 , lowerCAmelCase__=2 , lowerCAmelCase__=0.1 , **lowerCAmelCase__ , ) -> str: '''simple docstring''' super().__init__(**lowerCAmelCase__ ) __lowercase = hidden_size __lowercase = num_hidden_layers __lowercase = num_attention_heads __lowercase = intermediate_size __lowercase = hidden_act __lowercase = hidden_dropout_prob __lowercase = attention_probs_dropout_prob __lowercase = initializer_range __lowercase = layer_norm_eps __lowercase = image_size __lowercase = patch_size __lowercase = num_channels __lowercase = qkv_bias __lowercase = num_detection_tokens __lowercase = use_mid_position_embeddings __lowercase = auxiliary_loss # Hungarian matcher __lowercase = class_cost __lowercase = bbox_cost __lowercase = giou_cost # Loss coefficients __lowercase = bbox_loss_coefficient __lowercase = giou_loss_coefficient __lowercase = eos_coefficient class _UpperCamelCase ( _UpperCAmelCase ): """simple docstring""" __a : int = version.parse('''1.11''' ) @property def _SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def _SCREAMING_SNAKE_CASE ( self ) -> float: '''simple docstring''' return 1E-4 @property def _SCREAMING_SNAKE_CASE ( self ) -> int: '''simple docstring''' return 12
522
1
print((lambda quine: quine % quine)("""print((lambda quine: quine %% quine)(%r))"""))
658
import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ASTConfig from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_torchaudio_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ASTForAudioClassification, ASTModel from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_torchaudio_available(): import torchaudio from transformers import ASTFeatureExtractor class UpperCAmelCase_ : """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=24 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=2 , ) -> Optional[Any]: UpperCamelCase :int = parent UpperCamelCase :List[Any] = batch_size UpperCamelCase :List[Any] = patch_size UpperCamelCase :Optional[int] = max_length UpperCamelCase :Union[str, Any] = num_mel_bins UpperCamelCase :Optional[int] = is_training UpperCamelCase :Dict = use_labels UpperCamelCase :Dict = hidden_size UpperCamelCase :Optional[int] = num_hidden_layers UpperCamelCase :str = num_attention_heads UpperCamelCase :Optional[int] = intermediate_size UpperCamelCase :List[str] = hidden_act UpperCamelCase :List[str] = hidden_dropout_prob UpperCamelCase :List[Any] = attention_probs_dropout_prob UpperCamelCase :str = type_sequence_label_size UpperCamelCase :List[Any] = initializer_range UpperCamelCase :Union[str, Any] = scope UpperCamelCase :List[Any] = frequency_stride UpperCamelCase :Tuple = time_stride # in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) UpperCamelCase :List[Any] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1 UpperCamelCase :List[str] = (self.max_length - self.patch_size) // self.time_stride + 1 UpperCamelCase :Tuple = frequency_out_dimension * time_out_dimension UpperCamelCase :Optional[int] = num_patches + 2 def UpperCAmelCase ( self ) -> Any: UpperCamelCase :Tuple = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] ) UpperCamelCase :Tuple = None if self.use_labels: UpperCamelCase :List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase :str = self.get_config() return config, input_values, labels def UpperCAmelCase ( self ) -> List[Any]: return ASTConfig( patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]: UpperCamelCase :Optional[Any] = ASTModel(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase :Tuple = model(SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase ( self ) -> Dict: UpperCamelCase :List[Any] = self.prepare_config_and_inputs() ( ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ) :Union[str, Any] = config_and_inputs UpperCamelCase :List[Any] = {'''input_values''': input_values} return config, inputs_dict @require_torch class UpperCAmelCase_ ( lowercase, lowercase, unittest.TestCase ): """simple docstring""" UpperCamelCase_ : Optional[int] =( ( ASTModel, ASTForAudioClassification, ) if is_torch_available() else () ) UpperCamelCase_ : Any =( {'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel} if is_torch_available() else {} ) UpperCamelCase_ : Optional[int] =False UpperCamelCase_ : List[Any] =False UpperCamelCase_ : Optional[Any] =False UpperCamelCase_ : Dict =False def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]: if pipeline_test_casse_name == "AudioClassificationPipelineTests": return True return False def UpperCAmelCase ( self ) -> Dict: UpperCamelCase :List[Any] = ASTModelTester(self ) UpperCamelCase :Dict = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 ) def UpperCAmelCase ( self ) -> Any: self.config_tester.run_common_tests() @unittest.skip(reason='''AST does not use inputs_embeds''' ) def UpperCAmelCase ( self ) -> str: pass def UpperCAmelCase ( self ) -> int: UpperCamelCase , UpperCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase :Dict = model_class(SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) UpperCamelCase :Optional[int] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) ) def UpperCAmelCase ( self ) -> Tuple: UpperCamelCase , UpperCamelCase :int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase :Dict = model_class(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :List[str] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase :Any = [*signature.parameters.keys()] UpperCamelCase :Optional[int] = ['''input_values'''] self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> List[Any]: UpperCamelCase :Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ ) @slow def UpperCAmelCase ( self ) -> Optional[int]: for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase :Union[str, Any] = ASTModel.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) def _A ( ): UpperCamelCase :Any = hf_hub_download( repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' ) UpperCamelCase , UpperCamelCase :Any = torchaudio.load(SCREAMING_SNAKE_CASE__ ) return audio, sampling_rate @require_torch @require_torchaudio class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" @cached_property def UpperCAmelCase ( self ) -> Tuple: return ( ASTFeatureExtractor.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' ) if is_torchaudio_available() else None ) @slow def UpperCAmelCase ( self ) -> str: UpperCamelCase :Union[str, Any] = self.default_feature_extractor UpperCamelCase :Union[str, Any] = ASTForAudioClassification.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :str = self.default_feature_extractor UpperCamelCase , UpperCamelCase :Dict = prepare_audio() UpperCamelCase :Dict = audio.squeeze().numpy() UpperCamelCase :int = feature_extractor(SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE_ ) # forward pass with torch.no_grad(): UpperCamelCase :Union[str, Any] = model(**SCREAMING_SNAKE_CASE_ ) # verify the logits UpperCamelCase :List[Any] = torch.Size((1, 527) ) self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ ) UpperCamelCase :int = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(SCREAMING_SNAKE_CASE_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
658
1
import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class a__ ( _snake_case ): """simple docstring""" A__ : Dict = (UniPCMultistepScheduler,) A__ : List[Any] = (('''num_inference_steps''', 25),) def __UpperCAmelCase ( self :str , **lowercase__ :str ): lowercase = { 'num_train_timesteps': 1000, 'beta_start': 0.0001, 'beta_end': 0.02, 'beta_schedule': 'linear', 'solver_order': 2, 'solver_type': 'bh2', } config.update(**lowercase__ ) return config def __UpperCAmelCase ( self :int , lowercase__ :Union[str, Any]=0 , **lowercase__ :Dict ): lowercase = dict(self.forward_default_kwargs ) lowercase = kwargs.pop('num_inference_steps' , lowercase__ ) lowercase = self.dummy_sample lowercase = 0.1 * sample lowercase = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: lowercase = self.get_scheduler_config(**lowercase__ ) lowercase = scheduler_class(**lowercase__ ) scheduler.set_timesteps(lowercase__ ) # copy over dummy past residuals lowercase = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowercase__ ) lowercase = scheduler_class.from_pretrained(lowercase__ ) new_scheduler.set_timesteps(lowercase__ ) # copy over dummy past residuals lowercase = dummy_past_residuals[: new_scheduler.config.solver_order] lowercase , lowercase = sample, sample for t in range(lowercase__ , time_step + scheduler.config.solver_order + 1 ): lowercase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample lowercase = new_scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def __UpperCAmelCase ( self :List[Any] , lowercase__ :Any=0 , **lowercase__ :Optional[Any] ): lowercase = dict(self.forward_default_kwargs ) lowercase = kwargs.pop('num_inference_steps' , lowercase__ ) lowercase = self.dummy_sample lowercase = 0.1 * sample lowercase = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: lowercase = self.get_scheduler_config() lowercase = scheduler_class(**lowercase__ ) scheduler.set_timesteps(lowercase__ ) # copy over dummy past residuals (must be after setting timesteps) lowercase = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowercase__ ) lowercase = scheduler_class.from_pretrained(lowercase__ ) # copy over dummy past residuals new_scheduler.set_timesteps(lowercase__ ) # copy over dummy past residual (must be after setting timesteps) lowercase = dummy_past_residuals[: new_scheduler.config.solver_order] lowercase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample lowercase = new_scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def __UpperCAmelCase ( self :Tuple , lowercase__ :Any=None , **lowercase__ :Dict ): if scheduler is None: lowercase = self.scheduler_classes[0] lowercase = self.get_scheduler_config(**lowercase__ ) lowercase = scheduler_class(**lowercase__ ) lowercase = self.scheduler_classes[0] lowercase = self.get_scheduler_config(**lowercase__ ) lowercase = scheduler_class(**lowercase__ ) lowercase = 10 lowercase = self.dummy_model() lowercase = self.dummy_sample_deter scheduler.set_timesteps(lowercase__ ) for i, t in enumerate(scheduler.timesteps ): lowercase = model(lowercase__ , lowercase__ ) lowercase = scheduler.step(lowercase__ , lowercase__ , lowercase__ ).prev_sample return sample def __UpperCAmelCase ( self :List[str] ): lowercase = dict(self.forward_default_kwargs ) lowercase = kwargs.pop('num_inference_steps' , lowercase__ ) for scheduler_class in self.scheduler_classes: lowercase = self.get_scheduler_config() lowercase = scheduler_class(**lowercase__ ) lowercase = self.dummy_sample lowercase = 0.1 * sample if num_inference_steps is not None and hasattr(lowercase__ , 'set_timesteps' ): scheduler.set_timesteps(lowercase__ ) elif num_inference_steps is not None and not hasattr(lowercase__ , 'set_timesteps' ): lowercase = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) lowercase = [residual + 0.2, residual + 0.15, residual + 0.10] lowercase = dummy_past_residuals[: scheduler.config.solver_order] lowercase = scheduler.timesteps[5] lowercase = scheduler.timesteps[6] lowercase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample lowercase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def __UpperCAmelCase ( self :int ): # make sure that iterating over schedulers with same config names gives same results # for defaults lowercase = UniPCMultistepScheduler(**self.get_scheduler_config() ) lowercase = self.full_loop(scheduler=lowercase__ ) lowercase = torch.mean(torch.abs(lowercase__ ) ) assert abs(result_mean.item() - 0.2464 ) < 1E-3 lowercase = DPMSolverSinglestepScheduler.from_config(scheduler.config ) lowercase = DEISMultistepScheduler.from_config(scheduler.config ) lowercase = DPMSolverMultistepScheduler.from_config(scheduler.config ) lowercase = UniPCMultistepScheduler.from_config(scheduler.config ) lowercase = self.full_loop(scheduler=lowercase__ ) lowercase = torch.mean(torch.abs(lowercase__ ) ) assert abs(result_mean.item() - 0.2464 ) < 1E-3 def __UpperCAmelCase ( self :Union[str, Any] ): for timesteps in [25, 50, 100, 999, 1000]: self.check_over_configs(num_train_timesteps=lowercase__ ) def __UpperCAmelCase ( self :Optional[int] ): self.check_over_configs(thresholding=lowercase__ ) for order in [1, 2, 3]: for solver_type in ["bh1", "bh2"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=lowercase__ , prediction_type=lowercase__ , sample_max_value=lowercase__ , solver_order=lowercase__ , solver_type=lowercase__ , ) def __UpperCAmelCase ( self :int ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=lowercase__ ) def __UpperCAmelCase ( self :Optional[Any] ): for solver_type in ["bh1", "bh2"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=lowercase__ , solver_type=lowercase__ , prediction_type=lowercase__ , ) lowercase = self.full_loop( solver_order=lowercase__ , solver_type=lowercase__ , prediction_type=lowercase__ , ) assert not torch.isnan(lowercase__ ).any(), "Samples have nan numbers" def __UpperCAmelCase ( self :Optional[int] ): self.check_over_configs(lower_order_final=lowercase__ ) self.check_over_configs(lower_order_final=lowercase__ ) def __UpperCAmelCase ( self :Any ): for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]: self.check_over_forward(num_inference_steps=lowercase__ , time_step=0 ) def __UpperCAmelCase ( self :List[str] ): lowercase = self.full_loop() lowercase = torch.mean(torch.abs(lowercase__ ) ) assert abs(result_mean.item() - 0.2464 ) < 1E-3 def __UpperCAmelCase ( self :Union[str, Any] ): lowercase = self.full_loop(prediction_type='v_prediction' ) lowercase = torch.mean(torch.abs(lowercase__ ) ) assert abs(result_mean.item() - 0.1014 ) < 1E-3 def __UpperCAmelCase ( self :int ): lowercase = self.scheduler_classes[0] lowercase = self.get_scheduler_config(thresholding=lowercase__ , dynamic_thresholding_ratio=0 ) lowercase = scheduler_class(**lowercase__ ) lowercase = 10 lowercase = self.dummy_model() lowercase = self.dummy_sample_deter.half() scheduler.set_timesteps(lowercase__ ) for i, t in enumerate(scheduler.timesteps ): lowercase = model(lowercase__ , lowercase__ ) lowercase = scheduler.step(lowercase__ , lowercase__ , lowercase__ ).prev_sample assert sample.dtype == torch.floataa def __UpperCAmelCase ( self :List[Any] , **lowercase__ :str ): for scheduler_class in self.scheduler_classes: lowercase = self.get_scheduler_config(**lowercase__ ) lowercase = scheduler_class(**lowercase__ ) scheduler.set_timesteps(scheduler.config.num_train_timesteps ) assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
314
from typing import Optional from .. import Features, NamedSplit from ..packaged_modules.text.text import Text from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class a__ ( _snake_case ): """simple docstring""" def __init__( self :Union[str, Any] , lowercase__ :NestedDataStructureLike[PathLike] , lowercase__ :Optional[NamedSplit] = None , lowercase__ :Optional[Features] = None , lowercase__ :str = None , lowercase__ :bool = False , lowercase__ :bool = False , lowercase__ :Optional[int] = None , **lowercase__ :Any , ): super().__init__( lowercase__ , split=lowercase__ , features=lowercase__ , cache_dir=lowercase__ , keep_in_memory=lowercase__ , streaming=lowercase__ , num_proc=lowercase__ , **lowercase__ , ) lowercase = path_or_paths if isinstance(lowercase__ , lowercase__ ) else {self.split: path_or_paths} lowercase = Text( cache_dir=lowercase__ , data_files=lowercase__ , features=lowercase__ , **lowercase__ , ) def __UpperCAmelCase ( self :Any ): # Build iterable dataset if self.streaming: lowercase = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: lowercase = None lowercase = None lowercase = None lowercase = None self.builder.download_and_prepare( download_config=lowercase__ , download_mode=lowercase__ , verification_mode=lowercase__ , base_path=lowercase__ , num_proc=self.num_proc , ) lowercase = self.builder.as_dataset( split=self.split , verification_mode=lowercase__ , in_memory=self.keep_in_memory ) return dataset
314
1
from __future__ import annotations def lowercase__ ( A_: int ) -> bool: """simple docstring""" __UpperCAmelCase =str(A_ ) return len(A_ ) == 9 and set(A_ ) == set("""123456789""" ) def lowercase__ ( ) -> int | None: """simple docstring""" for base_num in range(9999 , 4999 , -1 ): __UpperCAmelCase =100002 * base_num if is_9_pandigital(A_ ): return candidate for base_num in range(333 , 99 , -1 ): __UpperCAmelCase =1002003 * base_num if is_9_pandigital(A_ ): return candidate return None if __name__ == "__main__": print(F"""{solution() = }""")
68
import os import re import unicodedata from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import is_torch_available, logging if is_torch_available(): import torch if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation __A = logging.get_logger(__name__) __A = {"vocab_file": "spiece.model"} __A = { "vocab_file": { "AI-Sweden/gpt-sw3-126m": "https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-350m": "https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-1.6b": "https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-20b": "https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model", } } __A = { "AI-Sweden/gpt-sw3-126m": 20_48, "AI-Sweden/gpt-sw3-350m": 20_48, "AI-Sweden/gpt-sw3-1.6b": 20_48, "AI-Sweden/gpt-sw3-6.7b": 20_48, "AI-Sweden/gpt-sw3-20b": 20_48, } class _A ( UpperCamelCase ): """simple docstring""" lowerCamelCase : int = VOCAB_FILES_NAMES lowerCamelCase : int = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase : Optional[Any] = ['input_ids', 'attention_mask'] def __init__( self : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : Union[str, Any]=False , __SCREAMING_SNAKE_CASE : List[str]=False , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Optional[Dict[str, Any]] = None , **__SCREAMING_SNAKE_CASE : Optional[Any] , ) -> None: __UpperCAmelCase ={} if sp_model_kwargs is None else sp_model_kwargs __UpperCAmelCase =kwargs.get("""name_or_path""" ) if name_or_path is None: logger.warning( """name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,""" """ you are testing the model, this can safely be ignored""" ) __UpperCAmelCase ="""None""" # Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing __UpperCAmelCase ="""<|endoftext|>""" if eos_token is None else eos_token __UpperCAmelCase ="""<unk>""" if unk_token is None else unk_token if "gpt-sw3-7b" in name_or_path: __UpperCAmelCase =unk_token if pad_token is None else pad_token __UpperCAmelCase =eos_token if bos_token is None else bos_token else: __UpperCAmelCase ="""<pad>""" if pad_token is None else pad_token __UpperCAmelCase ="""<s>""" if bos_token is None else bos_token super().__init__( do_lower_case=__SCREAMING_SNAKE_CASE , remove_space=__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , ) __UpperCAmelCase =do_lower_case __UpperCAmelCase =remove_space __UpperCAmelCase =keep_accents __UpperCAmelCase =vocab_file __UpperCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__SCREAMING_SNAKE_CASE ) # Used for whitespace normalization in input texts # fmt : off __UpperCAmelCase ={""" """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """""", """„"""} # fmt : on # Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing __UpperCAmelCase =re.compile( f'''[{"".join(map(__SCREAMING_SNAKE_CASE , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]''' ) def __getstate__( self : Any ) -> str: __UpperCAmelCase =self.__dict__.copy() __UpperCAmelCase =None return state def __setstate__( self : str , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Union[str, Any]: __UpperCAmelCase =d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): __UpperCAmelCase ={} __UpperCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) @property # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size def _a ( self : Union[str, Any] ) -> int: return len(self.sp_model ) def _a ( self : Dict , __SCREAMING_SNAKE_CASE : str ) -> str: __UpperCAmelCase =self.non_printing_characters_re.sub("""""" , __SCREAMING_SNAKE_CASE ) # Normalize whitespaces __UpperCAmelCase ="""""".join([char if char not in self.whitespaces else """ """ for char in text] ) # NFC Unicode normalization __UpperCAmelCase =unicodedata.normalize("""NFC""" , __SCREAMING_SNAKE_CASE ) return text def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : str , **__SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[str]: __UpperCAmelCase =self.preprocess_text(__SCREAMING_SNAKE_CASE ) return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE ) def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : str ) -> int: return self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE ) def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : int ) -> str: return self.sp_model.IdToPiece(__SCREAMING_SNAKE_CASE ) @staticmethod def _a ( __SCREAMING_SNAKE_CASE : str ) -> str: return out_string def _a ( self : Any , __SCREAMING_SNAKE_CASE : List[str] ) -> str: __UpperCAmelCase =[] __UpperCAmelCase ="""""" __UpperCAmelCase =False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: # TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document if not prev_is_special: out_string += " " out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) + token __UpperCAmelCase =True __UpperCAmelCase =[] else: current_sub_tokens.append(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =False out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) return out_string def _a ( self : Any ) -> Dict[str, int]: __UpperCAmelCase ={self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _a ( self : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(__SCREAMING_SNAKE_CASE ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __UpperCAmelCase =os.path.join( __SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE ) elif not os.path.isfile(self.vocab_file ): with open(__SCREAMING_SNAKE_CASE , """wb""" ) as fi: __UpperCAmelCase =self.sp_model.serialized_model_proto() fi.write(__SCREAMING_SNAKE_CASE ) return (out_vocab_file,) def _a ( self : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, List[str]] , __SCREAMING_SNAKE_CASE : Union[str, bool] = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]: if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): __UpperCAmelCase =self.preprocess_text(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =self.sp_model.encode(__SCREAMING_SNAKE_CASE ) else: __UpperCAmelCase =[self.preprocess_text(__SCREAMING_SNAKE_CASE ) for t in text] __UpperCAmelCase =self.sp_model.encode(__SCREAMING_SNAKE_CASE ) if return_tensors is True or return_tensors == "pt": __UpperCAmelCase =torch.tensor(__SCREAMING_SNAKE_CASE ) return token_ids def _a ( self : str , __SCREAMING_SNAKE_CASE : Union[int, List[int]] ) -> str: return self.sp_model.decode(__SCREAMING_SNAKE_CASE ) def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : "Conversation" ) -> List[int]: __UpperCAmelCase =[f'''User: {text}''' if is_user else f'''Bot: {text}''' for is_user, text in conversation.iter_texts()] __UpperCAmelCase =( f'''{self.eos_token}{self.bos_token}''' + f'''{self.bos_token}'''.join(__SCREAMING_SNAKE_CASE ) + f'''{self.bos_token}Bot:''' ) return self.encode(text=__SCREAMING_SNAKE_CASE )
68
1
'''simple docstring''' import itertools import json import os import unittest from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCAmelCase__ ( UpperCamelCase__ , unittest.TestCase ): a : List[str] = LongformerTokenizer a : Union[str, Any] = True a : str = LongformerTokenizerFast a : List[Any] = True def UpperCAmelCase_ ( self ) -> Dict: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt __lowerCAmelCase = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] __lowerCAmelCase = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) ) __lowerCAmelCase = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] __lowerCAmelCase = {"unk_token": "<unk>"} __lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) __lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(UpperCamelCase ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(UpperCamelCase ) ) def UpperCAmelCase_ ( self , **UpperCamelCase ) -> str: kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase ) def UpperCAmelCase_ ( self , **UpperCamelCase ) -> str: kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase ) def UpperCAmelCase_ ( self , UpperCamelCase ) -> int: __lowerCAmelCase = "lower newer" __lowerCAmelCase = "lower newer" return input_text, output_text def UpperCAmelCase_ ( self ) -> int: __lowerCAmelCase = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map ) __lowerCAmelCase = "lower newer" __lowerCAmelCase = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"] __lowerCAmelCase = tokenizer.tokenize(UpperCamelCase ) # , add_prefix_space=True) self.assertListEqual(UpperCamelCase , UpperCamelCase ) __lowerCAmelCase = tokens + [tokenizer.unk_token] __lowerCAmelCase = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) , UpperCamelCase ) def UpperCAmelCase_ ( self ) -> Union[str, Any]: __lowerCAmelCase = self.get_tokenizer() self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=UpperCamelCase ) , [0, 3_1414, 232, 328, 2] ) self.assertListEqual( tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=UpperCamelCase ) , [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2] , ) @slow def UpperCAmelCase_ ( self ) -> Optional[Any]: __lowerCAmelCase = self.tokenizer_class.from_pretrained("allenai/longformer-base-4096" ) __lowerCAmelCase = tokenizer.encode("sequence builders" , add_special_tokens=UpperCamelCase ) __lowerCAmelCase = tokenizer.encode("multi-sequence build" , add_special_tokens=UpperCamelCase ) __lowerCAmelCase = tokenizer.encode( "sequence builders" , add_special_tokens=UpperCamelCase , add_prefix_space=UpperCamelCase ) __lowerCAmelCase = tokenizer.encode( "sequence builders" , "multi-sequence build" , add_special_tokens=UpperCamelCase , add_prefix_space=UpperCamelCase ) __lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCamelCase ) __lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCamelCase , UpperCamelCase ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def UpperCAmelCase_ ( self ) -> Optional[Any]: __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = "Encode this sequence." __lowerCAmelCase = tokenizer.byte_encoder[" ".encode("utf-8" )[0]] # Testing encoder arguments __lowerCAmelCase = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase , add_prefix_space=UpperCamelCase ) __lowerCAmelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(UpperCamelCase , UpperCamelCase ) __lowerCAmelCase = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase , add_prefix_space=UpperCamelCase ) __lowerCAmelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(UpperCamelCase , UpperCamelCase ) tokenizer.add_special_tokens({"bos_token": "<s>"} ) __lowerCAmelCase = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) __lowerCAmelCase = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(UpperCamelCase , UpperCamelCase ) # Testing spaces after special tokens __lowerCAmelCase = "<mask>" tokenizer.add_special_tokens( {"mask_token": AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase )} ) # mask token has a left space __lowerCAmelCase = tokenizer.convert_tokens_to_ids(UpperCamelCase ) __lowerCAmelCase = "Encode <mask> sequence" __lowerCAmelCase = "Encode <mask>sequence" __lowerCAmelCase = tokenizer.encode(UpperCamelCase ) __lowerCAmelCase = encoded.index(UpperCamelCase ) __lowerCAmelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(UpperCamelCase , UpperCamelCase ) __lowerCAmelCase = tokenizer.encode(UpperCamelCase ) __lowerCAmelCase = encoded.index(UpperCamelCase ) __lowerCAmelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(UpperCamelCase , UpperCamelCase ) def UpperCAmelCase_ ( self ) -> List[Any]: pass def UpperCAmelCase_ ( self ) -> List[Any]: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase ) __lowerCAmelCase = self.tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase ) __lowerCAmelCase = "A, <mask> AllenNLP sentence." __lowerCAmelCase = tokenizer_r.encode_plus(UpperCamelCase , add_special_tokens=UpperCamelCase , return_token_type_ids=UpperCamelCase ) __lowerCAmelCase = tokenizer_p.encode_plus(UpperCamelCase , add_special_tokens=UpperCamelCase , return_token_type_ids=UpperCamelCase ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , ) __lowerCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] ) __lowerCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p["input_ids"] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] ) self.assertSequenceEqual(tokens_r["input_ids"] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] ) self.assertSequenceEqual( UpperCamelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) self.assertSequenceEqual( UpperCamelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) def UpperCAmelCase_ ( self ) -> List[str]: for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ): __lowerCAmelCase = self.rust_tokenizer_class.from_pretrained( self.tmpdirname , use_fast=UpperCamelCase , add_prefix_space=UpperCamelCase , trim_offsets=UpperCamelCase ) __lowerCAmelCase = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) __lowerCAmelCase = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state["add_prefix_space"] , UpperCamelCase ) self.assertEqual(post_processor_state["add_prefix_space"] , UpperCamelCase ) self.assertEqual(post_processor_state["trim_offsets"] , UpperCamelCase ) def UpperCAmelCase_ ( self ) -> Union[str, Any]: # Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and # `trim_offsets` for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __lowerCAmelCase = "hello" # `hello` is a token in the vocabulary of `pretrained_name` __lowerCAmelCase = F'''{text_of_1_token} {text_of_1_token}''' __lowerCAmelCase = self.rust_tokenizer_class.from_pretrained( UpperCamelCase , use_fast=UpperCamelCase , add_prefix_space=UpperCamelCase , trim_offsets=UpperCamelCase ) __lowerCAmelCase = tokenizer_r(UpperCamelCase , return_offsets_mapping=UpperCamelCase , add_special_tokens=UpperCamelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(UpperCamelCase ) + 1, len(UpperCamelCase ) + 1 + len(UpperCamelCase )) , ) __lowerCAmelCase = self.rust_tokenizer_class.from_pretrained( UpperCamelCase , use_fast=UpperCamelCase , add_prefix_space=UpperCamelCase , trim_offsets=UpperCamelCase ) __lowerCAmelCase = tokenizer_r(UpperCamelCase , return_offsets_mapping=UpperCamelCase , add_special_tokens=UpperCamelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(UpperCamelCase ) + 1, len(UpperCamelCase ) + 1 + len(UpperCamelCase )) , ) __lowerCAmelCase = self.rust_tokenizer_class.from_pretrained( UpperCamelCase , use_fast=UpperCamelCase , add_prefix_space=UpperCamelCase , trim_offsets=UpperCamelCase ) __lowerCAmelCase = tokenizer_r(UpperCamelCase , return_offsets_mapping=UpperCamelCase , add_special_tokens=UpperCamelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(UpperCamelCase ), len(UpperCamelCase ) + 1 + len(UpperCamelCase )) , ) __lowerCAmelCase = self.rust_tokenizer_class.from_pretrained( UpperCamelCase , use_fast=UpperCamelCase , add_prefix_space=UpperCamelCase , trim_offsets=UpperCamelCase ) __lowerCAmelCase = tokenizer_r(UpperCamelCase , return_offsets_mapping=UpperCamelCase , add_special_tokens=UpperCamelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(UpperCamelCase ), len(UpperCamelCase ) + 1 + len(UpperCamelCase )) , ) __lowerCAmelCase = F''' {text}''' # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) __lowerCAmelCase = self.rust_tokenizer_class.from_pretrained( UpperCamelCase , use_fast=UpperCamelCase , add_prefix_space=UpperCamelCase , trim_offsets=UpperCamelCase ) __lowerCAmelCase = tokenizer_r(UpperCamelCase , return_offsets_mapping=UpperCamelCase , add_special_tokens=UpperCamelCase ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(UpperCamelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(UpperCamelCase ) + 1, 1 + len(UpperCamelCase ) + 1 + len(UpperCamelCase )) , ) __lowerCAmelCase = self.rust_tokenizer_class.from_pretrained( UpperCamelCase , use_fast=UpperCamelCase , add_prefix_space=UpperCamelCase , trim_offsets=UpperCamelCase ) __lowerCAmelCase = tokenizer_r(UpperCamelCase , return_offsets_mapping=UpperCamelCase , add_special_tokens=UpperCamelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(UpperCamelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(UpperCamelCase ), 1 + len(UpperCamelCase ) + 1 + len(UpperCamelCase )) , ) __lowerCAmelCase = self.rust_tokenizer_class.from_pretrained( UpperCamelCase , use_fast=UpperCamelCase , add_prefix_space=UpperCamelCase , trim_offsets=UpperCamelCase ) __lowerCAmelCase = tokenizer_r(UpperCamelCase , return_offsets_mapping=UpperCamelCase , add_special_tokens=UpperCamelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(UpperCamelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(UpperCamelCase ), 1 + len(UpperCamelCase ) + 1 + len(UpperCamelCase )) , )
39
'''simple docstring''' import bza import gzip import lzma import os import shutil import struct import tarfile import warnings import zipfile from abc import ABC, abstractmethod from pathlib import Path from typing import Dict, List, Optional, Type, Union from .. import config from .filelock import FileLock from .logging import get_logger lowerCAmelCase : List[Any] = get_logger(__name__) class UpperCAmelCase__ : def __init__( self , UpperCamelCase = None ) -> Union[str, Any]: __lowerCAmelCase = ( os.path.join(UpperCamelCase , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH ) __lowerCAmelCase = Extractor def UpperCAmelCase_ ( self , UpperCamelCase ) -> str: from .file_utils import hash_url_to_filename # Path where we extract compressed archives # We extract in the cache dir, and get the extracted path name by hashing the original path" __lowerCAmelCase = os.path.abspath(UpperCamelCase ) return os.path.join(self.extract_dir , hash_url_to_filename(UpperCamelCase ) ) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase ) -> bool: return force_extract or ( not os.path.isfile(UpperCamelCase ) and not (os.path.isdir(UpperCamelCase ) and os.listdir(UpperCamelCase )) ) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = False ) -> str: __lowerCAmelCase = self.extractor.infer_extractor_format(UpperCamelCase ) if not extractor_format: return input_path __lowerCAmelCase = self._get_output_path(UpperCamelCase ) if self._do_extract(UpperCamelCase , UpperCamelCase ): self.extractor.extract(UpperCamelCase , UpperCamelCase , UpperCamelCase ) return output_path class UpperCAmelCase__ ( UpperCamelCase__ ): @classmethod @abstractmethod def UpperCAmelCase_ ( cls , UpperCamelCase , **UpperCamelCase ) -> bool: ... @staticmethod @abstractmethod def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None: ... class UpperCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ): a : List[bytes] = [] @staticmethod def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> List[Any]: with open(UpperCamelCase , "rb" ) as f: return f.read(UpperCamelCase ) @classmethod def UpperCAmelCase_ ( cls , UpperCamelCase , UpperCamelCase = b"" ) -> bool: if not magic_number: __lowerCAmelCase = max(len(UpperCamelCase ) for cls_magic_number in cls.magic_numbers ) try: __lowerCAmelCase = cls.read_magic_number(UpperCamelCase , UpperCamelCase ) except OSError: return False return any(magic_number.startswith(UpperCamelCase ) for cls_magic_number in cls.magic_numbers ) class UpperCAmelCase__ ( UpperCamelCase__ ): @classmethod def UpperCAmelCase_ ( cls , UpperCamelCase , **UpperCamelCase ) -> bool: return tarfile.is_tarfile(UpperCamelCase ) @staticmethod def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> Dict: def resolved(UpperCamelCase ) -> str: return os.path.realpath(os.path.abspath(UpperCamelCase ) ) def badpath(UpperCamelCase , UpperCamelCase ) -> bool: # joinpath will ignore base if path is absolute return not resolved(os.path.join(UpperCamelCase , UpperCamelCase ) ).startswith(UpperCamelCase ) def badlink(UpperCamelCase , UpperCamelCase ) -> bool: # Links are interpreted relative to the directory containing the link __lowerCAmelCase = resolved(os.path.join(UpperCamelCase , os.path.dirname(info.name ) ) ) return badpath(info.linkname , base=UpperCamelCase ) __lowerCAmelCase = resolved(UpperCamelCase ) for finfo in members: if badpath(finfo.name , UpperCamelCase ): logger.error(F'''Extraction of {finfo.name} is blocked (illegal path)''' ) elif finfo.issym() and badlink(UpperCamelCase , UpperCamelCase ): logger.error(F'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''' ) elif finfo.islnk() and badlink(UpperCamelCase , UpperCamelCase ): logger.error(F'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''' ) else: yield finfo @staticmethod def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None: os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase ) __lowerCAmelCase = tarfile.open(UpperCamelCase ) tar_file.extractall(UpperCamelCase , members=TarExtractor.safemembers(UpperCamelCase , UpperCamelCase ) ) tar_file.close() class UpperCAmelCase__ ( UpperCamelCase__ ): a : Any = [B"""\x1F\x8B"""] @staticmethod def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None: with gzip.open(UpperCamelCase , "rb" ) as gzip_file: with open(UpperCamelCase , "wb" ) as extracted_file: shutil.copyfileobj(UpperCamelCase , UpperCamelCase ) class UpperCAmelCase__ ( UpperCamelCase__ ): a : List[Any] = [ B"""PK\x03\x04""", B"""PK\x05\x06""", # empty archive B"""PK\x07\x08""", # spanned archive ] @classmethod def UpperCAmelCase_ ( cls , UpperCamelCase , UpperCamelCase = b"" ) -> bool: if super().is_extractable(UpperCamelCase , magic_number=UpperCamelCase ): return True try: # Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives. # From: https://github.com/python/cpython/pull/5053 from zipfile import ( _CD_SIGNATURE, _ECD_DISK_NUMBER, _ECD_DISK_START, _ECD_ENTRIES_TOTAL, _ECD_OFFSET, _ECD_SIZE, _EndRecData, sizeCentralDir, stringCentralDir, structCentralDir, ) with open(UpperCamelCase , "rb" ) as fp: __lowerCAmelCase = _EndRecData(UpperCamelCase ) if endrec: if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0: return True # Empty zipfiles are still zipfiles elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]: fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir: __lowerCAmelCase = fp.read(UpperCamelCase ) # CD is where we expect it to be if len(UpperCamelCase ) == sizeCentralDir: __lowerCAmelCase = struct.unpack(UpperCamelCase , UpperCamelCase ) # CD is the right size if centdir[_CD_SIGNATURE] == stringCentralDir: return True # First central directory entry has correct magic number return False except Exception: # catch all errors in case future python versions change the zipfile internals return False @staticmethod def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None: os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase ) with zipfile.ZipFile(UpperCamelCase , "r" ) as zip_file: zip_file.extractall(UpperCamelCase ) zip_file.close() class UpperCAmelCase__ ( UpperCamelCase__ ): a : Tuple = [B"""\xFD\x37\x7A\x58\x5A\x00"""] @staticmethod def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None: with lzma.open(UpperCamelCase ) as compressed_file: with open(UpperCamelCase , "wb" ) as extracted_file: shutil.copyfileobj(UpperCamelCase , UpperCamelCase ) class UpperCAmelCase__ ( UpperCamelCase__ ): a : str = [B"""Rar!\x1a\x07\x00""", B"""Rar!\x1a\x07\x01\x00"""] # RAR_ID # RAR5_ID @staticmethod def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None: if not config.RARFILE_AVAILABLE: raise ImportError("Please pip install rarfile" ) import rarfile os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase ) __lowerCAmelCase = rarfile.RarFile(UpperCamelCase ) rf.extractall(UpperCamelCase ) rf.close() class UpperCAmelCase__ ( UpperCamelCase__ ): a : int = [B"""\x28\xb5\x2F\xFD"""] @staticmethod def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None: if not config.ZSTANDARD_AVAILABLE: raise ImportError("Please pip install zstandard" ) import zstandard as zstd __lowerCAmelCase = zstd.ZstdDecompressor() with open(UpperCamelCase , "rb" ) as ifh, open(UpperCamelCase , "wb" ) as ofh: dctx.copy_stream(UpperCamelCase , UpperCamelCase ) class UpperCAmelCase__ ( UpperCamelCase__ ): a : Any = [B"""\x42\x5A\x68"""] @staticmethod def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None: with bza.open(UpperCamelCase , "rb" ) as compressed_file: with open(UpperCamelCase , "wb" ) as extracted_file: shutil.copyfileobj(UpperCamelCase , UpperCamelCase ) class UpperCAmelCase__ ( UpperCamelCase__ ): a : Any = [B"""\x37\x7A\xBC\xAF\x27\x1C"""] @staticmethod def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None: if not config.PY7ZR_AVAILABLE: raise ImportError("Please pip install py7zr" ) import pyazr os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase ) with pyazr.SevenZipFile(UpperCamelCase , "r" ) as archive: archive.extractall(UpperCamelCase ) class UpperCAmelCase__ ( UpperCamelCase__ ): a : Any = [B"""\x04\x22\x4D\x18"""] @staticmethod def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None: if not config.LZ4_AVAILABLE: raise ImportError("Please pip install lz4" ) import lza.frame with lza.frame.open(UpperCamelCase , "rb" ) as compressed_file: with open(UpperCamelCase , "wb" ) as extracted_file: shutil.copyfileobj(UpperCamelCase , UpperCamelCase ) class UpperCAmelCase__ : # Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip) a : Dict[str, Type[BaseExtractor]] = { "tar": TarExtractor, "gzip": GzipExtractor, "zip": ZipExtractor, "xz": XzExtractor, "rar": RarExtractor, "zstd": ZstdExtractor, "bz2": BzipaExtractor, "7z": SevenZipExtractor, # <Added version="2.4.0"/> "lz4": LzaExtractor, # <Added version="2.4.0"/> } @classmethod def UpperCAmelCase_ ( cls ) -> Optional[Any]: return max( len(UpperCamelCase ) for extractor in cls.extractors.values() if issubclass(UpperCamelCase , UpperCamelCase ) for extractor_magic_number in extractor.magic_numbers ) @staticmethod def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> Dict: try: return MagicNumberBaseExtractor.read_magic_number(UpperCamelCase , magic_number_length=UpperCamelCase ) except OSError: return b"" @classmethod def UpperCAmelCase_ ( cls , UpperCamelCase , UpperCamelCase = False ) -> bool: warnings.warn( "Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. " "Use 'infer_extractor_format' instead." , category=UpperCamelCase , ) __lowerCAmelCase = cls.infer_extractor_format(UpperCamelCase ) if extractor_format: return True if not return_extractor else (True, cls.extractors[extractor_format]) return False if not return_extractor else (False, None) @classmethod def UpperCAmelCase_ ( cls , UpperCamelCase ) -> str: # <Added version="2.4.0"/> __lowerCAmelCase = cls._get_magic_number_max_length() __lowerCAmelCase = cls._read_magic_number(UpperCamelCase , UpperCamelCase ) for extractor_format, extractor in cls.extractors.items(): if extractor.is_extractable(UpperCamelCase , magic_number=UpperCamelCase ): return extractor_format @classmethod def UpperCAmelCase_ ( cls , UpperCamelCase , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = "deprecated" , ) -> None: os.makedirs(os.path.dirname(UpperCamelCase ) , exist_ok=UpperCamelCase ) # Prevent parallel extractions __lowerCAmelCase = str(Path(UpperCamelCase ).with_suffix(".lock" ) ) with FileLock(UpperCamelCase ): shutil.rmtree(UpperCamelCase , ignore_errors=UpperCamelCase ) if extractor_format or extractor != "deprecated": if extractor != "deprecated" or not isinstance(UpperCamelCase , UpperCamelCase ): # passed as positional arg warnings.warn( "Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. " "Use 'extractor_format' instead." , category=UpperCamelCase , ) __lowerCAmelCase = extractor if extractor != "deprecated" else extractor_format else: __lowerCAmelCase = cls.extractors[extractor_format] return extractor.extract(UpperCamelCase , UpperCamelCase ) else: warnings.warn( "Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an " "exception in 3.0.0." , category=UpperCamelCase , ) for extractor in cls.extractors.values(): if extractor.is_extractable(UpperCamelCase ): return extractor.extract(UpperCamelCase , UpperCamelCase )
39
1
import json import os import shutil import tempfile import unittest from multiprocessing import get_context from pathlib import Path import datasets import numpy as np from datasets import load_dataset from parameterized import parameterized from transformers import AutoProcessor from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available from ..wavaveca.test_feature_extraction_wavaveca import floats_list if is_pyctcdecode_available(): from huggingface_hub import snapshot_download from pyctcdecode import BeamSearchDecoderCTC from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput if is_torch_available(): from transformers import WavaVecaForCTC @require_pyctcdecode class __A( unittest.TestCase ): def SCREAMING_SNAKE_CASE_ ( self ) -> Dict: '''simple docstring''' __a = "| <pad> <unk> <s> </s> a b c d e f g h i j k".split() __a = dict(zip(__A , range(len(__A ) ) ) ) __a = { "unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>", } __a = { "feature_size": 1, "padding_value": 0.0, "sampling_rate": 16_000, "return_attention_mask": False, "do_normalize": True, } __a = tempfile.mkdtemp() __a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) __a = os.path.join(self.tmpdirname , __A ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__A ) + '''\n''' ) with open(self.feature_extraction_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__A ) + '''\n''' ) # load decoder from hub __a = "hf-internal-testing/ngram-beam-search-decoder" def SCREAMING_SNAKE_CASE_ ( self , **_snake_case ) -> Any: '''simple docstring''' __a = self.add_kwargs_tokens_map.copy() kwargs.update(__A ) return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **__A ) def SCREAMING_SNAKE_CASE_ ( self , **_snake_case ) -> str: '''simple docstring''' return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **__A ) def SCREAMING_SNAKE_CASE_ ( self , **_snake_case ) -> int: '''simple docstring''' return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **__A ) def SCREAMING_SNAKE_CASE_ ( self ) -> Dict: '''simple docstring''' shutil.rmtree(self.tmpdirname ) def SCREAMING_SNAKE_CASE_ ( self ) -> Dict: '''simple docstring''' __a = self.get_tokenizer() __a = self.get_feature_extractor() __a = self.get_decoder() __a = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A ) processor.save_pretrained(self.tmpdirname ) __a = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname ) # tokenizer self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , __A ) # feature extractor self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor , __A ) # decoder self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels ) self.assertEqual( processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , ) self.assertIsInstance(processor.decoder , __A ) def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]: '''simple docstring''' __a = WavaVecaProcessorWithLM( tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() ) processor.save_pretrained(self.tmpdirname ) # make sure that error is thrown when decoder alphabet doesn't match __a = WavaVecaProcessorWithLM.from_pretrained( self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 ) # decoder self.assertEqual(processor.language_model.alpha , 5.0 ) self.assertEqual(processor.language_model.beta , 3.0 ) self.assertEqual(processor.language_model.score_boundary , -7.0 ) self.assertEqual(processor.language_model.unk_score_offset , 3 ) def SCREAMING_SNAKE_CASE_ ( self ) -> Dict: '''simple docstring''' __a = self.get_tokenizer() # add token to trigger raise tokenizer.add_tokens(['''xx'''] ) with self.assertRaisesRegex(__A , '''include''' ): WavaVecaProcessorWithLM( tokenizer=__A , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() ) def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]: '''simple docstring''' __a = self.get_feature_extractor() __a = self.get_tokenizer() __a = self.get_decoder() __a = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A ) __a = floats_list((3, 1_000) ) __a = feature_extractor(__A , return_tensors='''np''' ) __a = processor(__A , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def SCREAMING_SNAKE_CASE_ ( self ) -> Any: '''simple docstring''' __a = self.get_feature_extractor() __a = self.get_tokenizer() __a = self.get_decoder() __a = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A ) __a = "This is a test string" __a = processor(text=__A ) __a = tokenizer(__A ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case=(2, 10, 16) , _snake_case=77 ) -> int: '''simple docstring''' np.random.seed(__A ) return np.random.rand(*__A ) def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple: '''simple docstring''' __a = self.get_feature_extractor() __a = self.get_tokenizer() __a = self.get_decoder() __a = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A ) __a = self._get_dummy_logits(shape=(10, 16) , seed=13 ) __a = processor.decode(__A ) __a = decoder.decode_beams(__A )[0] self.assertEqual(decoded_decoder[0] , decoded_processor.text ) self.assertEqual('''</s> <s> </s>''' , decoded_processor.text ) self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score ) self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score ) @parameterized.expand([[None], ['''fork'''], ['''spawn''']] ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Optional[Any]: '''simple docstring''' __a = self.get_feature_extractor() __a = self.get_tokenizer() __a = self.get_decoder() __a = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A ) __a = self._get_dummy_logits() # note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM. # otherwise, the LM won't be available to the pool's sub-processes. # manual logic used to allow parameterized test for both pool=None and pool=Pool(...) if pool_context is None: __a = processor.batch_decode(__A ) else: with get_context(__A ).Pool() as pool: __a = processor.batch_decode(__A , __A ) __a = list(__A ) with get_context('''fork''' ).Pool() as p: __a = decoder.decode_beams_batch(__A , __A ) __a = [], [], [] for beams in decoded_beams: texts_decoder.append(beams[0][0] ) logit_scores_decoder.append(beams[0][-2] ) lm_scores_decoder.append(beams[0][-1] ) self.assertListEqual(__A , decoded_processor.text ) self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] , decoded_processor.text ) self.assertListEqual(__A , decoded_processor.logit_score ) self.assertListEqual(__A , decoded_processor.lm_score ) def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]: '''simple docstring''' __a = self.get_feature_extractor() __a = self.get_tokenizer() __a = self.get_decoder() __a = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A ) __a = self._get_dummy_logits() __a = 15 __a = -20.0 __a = -4.0 __a = processor.batch_decode( __A , beam_width=__A , beam_prune_logp=__A , token_min_logp=__A , ) __a = decoded_processor_out.text __a = list(__A ) with get_context('''fork''' ).Pool() as pool: __a = decoder.decode_beams_batch( __A , __A , beam_width=__A , beam_prune_logp=__A , token_min_logp=__A , ) __a = [d[0][0] for d in decoded_decoder_out] __a = [d[0][2] for d in decoded_decoder_out] __a = [d[0][3] for d in decoded_decoder_out] self.assertListEqual(__A , __A ) self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] , __A ) self.assertTrue(np.array_equal(__A , decoded_processor_out.logit_score ) ) self.assertTrue(np.allclose([-20.054, -18.447] , __A , atol=1E-3 ) ) self.assertTrue(np.array_equal(__A , decoded_processor_out.lm_score ) ) self.assertTrue(np.allclose([-15.554, -13.9_474] , __A , atol=1E-3 ) ) def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]: '''simple docstring''' __a = self.get_feature_extractor() __a = self.get_tokenizer() __a = self.get_decoder() __a = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A ) __a = self._get_dummy_logits() __a = 2.0 __a = 5.0 __a = -20.0 __a = True __a = processor.batch_decode( __A , alpha=__A , beta=__A , unk_score_offset=__A , lm_score_boundary=__A , ) __a = decoded_processor_out.text __a = list(__A ) decoder.reset_params( alpha=__A , beta=__A , unk_score_offset=__A , lm_score_boundary=__A , ) with get_context('''fork''' ).Pool() as pool: __a = decoder.decode_beams_batch( __A , __A , ) __a = [d[0][0] for d in decoded_decoder_out] self.assertListEqual(__A , __A ) self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] , __A ) __a = processor.decoder.model_container[processor.decoder._model_key] self.assertEqual(lm_model.alpha , 2.0 ) self.assertEqual(lm_model.beta , 5.0 ) self.assertEqual(lm_model.unk_score_offset , -20.0 ) self.assertEqual(lm_model.score_boundary , __A ) def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]: '''simple docstring''' __a = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) __a = processor.decoder.model_container[processor.decoder._model_key] __a = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute() __a = os.listdir(__A ) __a = ["alphabet.json", "language_model"] downloaded_decoder_files.sort() expected_decoder_files.sort() # test that only decoder relevant files from # https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main # are downloaded and none of the rest (e.g. README.md, ...) self.assertListEqual(__A , __A ) def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple: '''simple docstring''' __a = snapshot_download('''hf-internal-testing/processor_with_lm''' ) __a = WavaVecaProcessorWithLM.from_pretrained(__A ) __a = processor.decoder.model_container[processor.decoder._model_key] __a = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute() __a = os.listdir(__A ) __a = os.listdir(__A ) local_decoder_files.sort() expected_decoder_files.sort() # test that both decoder form hub and local files in cache are the same self.assertListEqual(__A , __A ) def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple: '''simple docstring''' __a = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) __a = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' ) __a = floats_list((3, 1_000) ) __a = processor_wavaveca(__A , return_tensors='''np''' ) __a = processor_auto(__A , return_tensors='''np''' ) for key in input_wavaveca.keys(): self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 ) __a = self._get_dummy_logits() __a = processor_wavaveca.batch_decode(__A ) __a = processor_auto.batch_decode(__A ) self.assertListEqual(decoded_wavaveca.text , decoded_auto.text ) def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple: '''simple docstring''' __a = self.get_feature_extractor() __a = self.get_tokenizer() __a = self.get_decoder() __a = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A ) self.assertListEqual( processor.model_input_names , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , ) @staticmethod def SCREAMING_SNAKE_CASE_ ( _snake_case , _snake_case ) -> Optional[Any]: '''simple docstring''' __a = [d[key] for d in offsets] return retrieved_list def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]: '''simple docstring''' __a = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) __a = self._get_dummy_logits()[0] __a = processor.decode(__A , output_word_offsets=__A ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) , 4 ) self.assertTrue('''text''' in outputs ) self.assertTrue('''word_offsets''' in outputs ) self.assertTrue(isinstance(__A , __A ) ) self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) ) , outputs.text ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''start_offset''' ) , [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''end_offset''' ) , [1, 3, 5] ) def SCREAMING_SNAKE_CASE_ ( self ) -> Any: '''simple docstring''' __a = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) __a = self._get_dummy_logits() __a = processor.batch_decode(__A , output_word_offsets=__A ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) , 4 ) self.assertTrue('''text''' in outputs ) self.assertTrue('''word_offsets''' in outputs ) self.assertTrue(isinstance(__A , __A ) ) self.assertListEqual( [''' '''.join(self.get_from_offsets(__A , '''word''' ) ) for o in outputs['''word_offsets''']] , outputs.text ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''start_offset''' ) , [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''end_offset''' ) , [1, 3, 5] ) @slow @require_torch @require_torchaudio def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]: '''simple docstring''' import torch __a = load_dataset('''common_voice''' , '''en''' , split='''train''' , streaming=__A ) __a = ds.cast_column('''audio''' , datasets.Audio(sampling_rate=16_000 ) ) __a = iter(__A ) __a = next(__A ) __a = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' ) __a = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' ) # compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train __a = processor(sample['''audio''']['''array'''] , return_tensors='''pt''' ).input_values with torch.no_grad(): __a = model(__A ).logits.cpu().numpy() __a = processor.decode(logits[0] , output_word_offsets=__A ) __a = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate __a = [ { "start_time": d["start_offset"] * time_offset, "end_time": d["end_offset"] * time_offset, "word": d["word"], } for d in output["word_offsets"] ] __a = "WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL" # output words self.assertEqual(''' '''.join(self.get_from_offsets(__A , '''word''' ) ) , __A ) self.assertEqual(''' '''.join(self.get_from_offsets(__A , '''word''' ) ) , output.text ) # output times __a = torch.tensor(self.get_from_offsets(__A , '''start_time''' ) ) __a = torch.tensor(self.get_from_offsets(__A , '''end_time''' ) ) # fmt: off __a = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] ) __a = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] ) # fmt: on self.assertTrue(torch.allclose(__A , __A , atol=0.01 ) ) self.assertTrue(torch.allclose(__A , __A , atol=0.01 ) )
219
from typing import Any, Dict, Optional import torch import torch.nn.functional as F from torch import nn from ..utils import maybe_allow_in_graph from .activations import get_activation from .attention_processor import Attention from .embeddings import CombinedTimestepLabelEmbeddings @maybe_allow_in_graph class SCREAMING_SNAKE_CASE__ ( nn.Module ): """simple docstring""" def __init__( self : Optional[int] , __A : int , __A : int , __A : int , __A : Union[str, Any]=0.0 , __A : Optional[int] = None , __A : str = "geglu" , __A : Optional[int] = None , __A : bool = False , __A : bool = False , __A : bool = False , __A : bool = False , __A : bool = True , __A : str = "layer_norm" , __A : bool = False , ): super().__init__() snake_case__ : str = only_cross_attention snake_case__ : str = (num_embeds_ada_norm is not None) and norm_type == "ada_norm_zero" snake_case__ : Optional[Any] = (num_embeds_ada_norm is not None) and norm_type == "ada_norm" if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None: raise ValueError( f'''`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to''' f''' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.''' ) # Define 3 blocks. Each block has its own normalization layer. # 1. Self-Attn if self.use_ada_layer_norm: snake_case__ : Tuple = AdaLayerNorm(__A , __A ) elif self.use_ada_layer_norm_zero: snake_case__ : Optional[Any] = AdaLayerNormZero(__A , __A ) else: snake_case__ : List[str] = nn.LayerNorm(__A , elementwise_affine=__A ) snake_case__ : Optional[Any] = Attention( query_dim=__A , heads=__A , dim_head=__A , dropout=__A , bias=__A , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=__A , ) # 2. Cross-Attn if cross_attention_dim is not None or double_self_attention: # We currently only use AdaLayerNormZero for self attention where there will only be one attention block. # I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during # the second cross attention block. snake_case__ : Union[str, Any] = ( AdaLayerNorm(__A , __A ) if self.use_ada_layer_norm else nn.LayerNorm(__A , elementwise_affine=__A ) ) snake_case__ : List[Any] = Attention( query_dim=__A , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=__A , dim_head=__A , dropout=__A , bias=__A , upcast_attention=__A , ) # is self-attn if encoder_hidden_states is none else: snake_case__ : Tuple = None snake_case__ : List[str] = None # 3. Feed-forward snake_case__ : Optional[int] = nn.LayerNorm(__A , elementwise_affine=__A ) snake_case__ : str = FeedForward(__A , dropout=__A , activation_fn=__A , final_dropout=__A ) # let chunk size default to None snake_case__ : List[Any] = None snake_case__ : str = 0 def _lowercase ( self : int , __A : Optional[int] , __A : int ): # Sets chunk feed-forward snake_case__ : Dict = chunk_size snake_case__ : str = dim def _lowercase ( self : List[str] , __A : torch.FloatTensor , __A : Optional[torch.FloatTensor] = None , __A : Optional[torch.FloatTensor] = None , __A : Optional[torch.FloatTensor] = None , __A : Optional[torch.LongTensor] = None , __A : Dict[str, Any] = None , __A : Optional[torch.LongTensor] = None , ): # Notice that normalization is always applied before the real computation in the following blocks. # 1. Self-Attention if self.use_ada_layer_norm: snake_case__ : Tuple = self.norma(__A , __A ) elif self.use_ada_layer_norm_zero: snake_case__, snake_case__, snake_case__, snake_case__, snake_case__ : Dict = self.norma( __A , __A , __A , hidden_dtype=hidden_states.dtype ) else: snake_case__ : str = self.norma(__A ) snake_case__ : Union[str, Any] = cross_attention_kwargs if cross_attention_kwargs is not None else {} snake_case__ : Union[str, Any] = self.attna( __A , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=__A , **__A , ) if self.use_ada_layer_norm_zero: snake_case__ : str = gate_msa.unsqueeze(1 ) * attn_output snake_case__ : Dict = attn_output + hidden_states # 2. Cross-Attention if self.attna is not None: snake_case__ : List[str] = ( self.norma(__A , __A ) if self.use_ada_layer_norm else self.norma(__A ) ) snake_case__ : int = self.attna( __A , encoder_hidden_states=__A , attention_mask=__A , **__A , ) snake_case__ : Dict = attn_output + hidden_states # 3. Feed-forward snake_case__ : Any = self.norma(__A ) if self.use_ada_layer_norm_zero: snake_case__ : int = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None] if self._chunk_size is not None: # "feed_forward_chunk_size" can be used to save memory if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0: raise ValueError( f'''`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.''' ) snake_case__ : Dict = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size snake_case__ : str = torch.cat( [self.ff(__A ) for hid_slice in norm_hidden_states.chunk(__A , dim=self._chunk_dim )] , dim=self._chunk_dim , ) else: snake_case__ : Dict = self.ff(__A ) if self.use_ada_layer_norm_zero: snake_case__ : List[Any] = gate_mlp.unsqueeze(1 ) * ff_output snake_case__ : str = ff_output + hidden_states return hidden_states class SCREAMING_SNAKE_CASE__ ( nn.Module ): """simple docstring""" def __init__( self : Optional[Any] , __A : int , __A : Optional[int] = None , __A : int = 4 , __A : float = 0.0 , __A : str = "geglu" , __A : bool = False , ): super().__init__() snake_case__ : str = int(dim * mult ) snake_case__ : Any = dim_out if dim_out is not None else dim if activation_fn == "gelu": snake_case__ : Tuple = GELU(__A , __A ) if activation_fn == "gelu-approximate": snake_case__ : Union[str, Any] = GELU(__A , __A , approximate="tanh" ) elif activation_fn == "geglu": snake_case__ : Dict = GEGLU(__A , __A ) elif activation_fn == "geglu-approximate": snake_case__ : Dict = ApproximateGELU(__A , __A ) snake_case__ : Tuple = nn.ModuleList([] ) # project in self.net.append(__A ) # project dropout self.net.append(nn.Dropout(__A ) ) # project out self.net.append(nn.Linear(__A , __A ) ) # FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout if final_dropout: self.net.append(nn.Dropout(__A ) ) def _lowercase ( self : int , __A : Dict ): for module in self.net: snake_case__ : Optional[Any] = module(__A ) return hidden_states class SCREAMING_SNAKE_CASE__ ( nn.Module ): """simple docstring""" def __init__( self : Union[str, Any] , __A : int , __A : int , __A : str = "none" ): super().__init__() snake_case__ : Optional[int] = nn.Linear(__A , __A ) snake_case__ : List[str] = approximate def _lowercase ( self : List[str] , __A : Optional[Any] ): if gate.device.type != "mps": return F.gelu(__A , approximate=self.approximate ) # mps: gelu is not implemented for float16 return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype ) def _lowercase ( self : Any , __A : str ): snake_case__ : List[str] = self.proj(__A ) snake_case__ : List[Any] = self.gelu(__A ) return hidden_states class SCREAMING_SNAKE_CASE__ ( nn.Module ): """simple docstring""" def __init__( self : int , __A : int , __A : int ): super().__init__() snake_case__ : Any = nn.Linear(__A , dim_out * 2 ) def _lowercase ( self : Dict , __A : Optional[int] ): if gate.device.type != "mps": return F.gelu(__A ) # mps: gelu is not implemented for float16 return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype ) def _lowercase ( self : str , __A : List[Any] ): snake_case__, snake_case__ : Optional[int] = self.proj(__A ).chunk(2 , dim=-1 ) return hidden_states * self.gelu(__A ) class SCREAMING_SNAKE_CASE__ ( nn.Module ): """simple docstring""" def __init__( self : Union[str, Any] , __A : int , __A : int ): super().__init__() snake_case__ : Optional[int] = nn.Linear(__A , __A ) def _lowercase ( self : Union[str, Any] , __A : Tuple ): snake_case__ : List[str] = self.proj(__A ) return x * torch.sigmoid(1.7_0_2 * x ) class SCREAMING_SNAKE_CASE__ ( nn.Module ): """simple docstring""" def __init__( self : Optional[int] , __A : List[Any] , __A : str ): super().__init__() snake_case__ : Union[str, Any] = nn.Embedding(__A , __A ) snake_case__ : Optional[int] = nn.SiLU() snake_case__ : Optional[int] = nn.Linear(__A , embedding_dim * 2 ) snake_case__ : Tuple = nn.LayerNorm(__A , elementwise_affine=__A ) def _lowercase ( self : Any , __A : Dict , __A : str ): snake_case__ : Any = self.linear(self.silu(self.emb(__A ) ) ) snake_case__, snake_case__ : List[Any] = torch.chunk(__A , 2 ) snake_case__ : Tuple = self.norm(__A ) * (1 + scale) + shift return x class SCREAMING_SNAKE_CASE__ ( nn.Module ): """simple docstring""" def __init__( self : str , __A : int , __A : List[str] ): super().__init__() snake_case__ : List[str] = CombinedTimestepLabelEmbeddings(__A , __A ) snake_case__ : str = nn.SiLU() snake_case__ : Optional[Any] = nn.Linear(__A , 6 * embedding_dim , bias=__A ) snake_case__ : Tuple = nn.LayerNorm(__A , elementwise_affine=__A , eps=1e-6 ) def _lowercase ( self : Dict , __A : List[str] , __A : Optional[int] , __A : Any , __A : Optional[Any]=None ): snake_case__ : List[str] = self.linear(self.silu(self.emb(__A , __A , hidden_dtype=__A ) ) ) snake_case__, snake_case__, snake_case__, snake_case__, snake_case__, snake_case__ : Optional[int] = emb.chunk(6 , dim=1 ) snake_case__ : List[Any] = self.norm(__A ) * (1 + scale_msa[:, None]) + shift_msa[:, None] return x, gate_msa, shift_mlp, scale_mlp, gate_mlp class SCREAMING_SNAKE_CASE__ ( nn.Module ): """simple docstring""" def __init__( self : List[Any] , __A : int , __A : int , __A : int , __A : Optional[str] = None , __A : float = 1e-5 ): super().__init__() snake_case__ : int = num_groups snake_case__ : Any = eps if act_fn is None: snake_case__ : Dict = None else: snake_case__ : Any = get_activation(__A ) snake_case__ : List[str] = nn.Linear(__A , out_dim * 2 ) def _lowercase ( self : str , __A : Optional[Any] , __A : Tuple ): if self.act: snake_case__ : List[Any] = self.act(__A ) snake_case__ : Union[str, Any] = self.linear(__A ) snake_case__ : Optional[int] = emb[:, :, None, None] snake_case__, snake_case__ : List[str] = emb.chunk(2 , dim=1 ) snake_case__ : Dict = F.group_norm(__A , self.num_groups , eps=self.eps ) snake_case__ : List[str] = x * (1 + scale) + shift return x
297
0
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..bit import BitConfig a_ : int = logging.get_logger(__name__) a_ : Tuple = { '''Intel/dpt-large''': '''https://huggingface.co/Intel/dpt-large/resolve/main/config.json''', # See all DPT models at https://huggingface.co/models?filter=dpt } class __lowercase( lowercase__ ): '''simple docstring''' __a : Optional[Any] = 'dpt' def __init__( self , __a=768 , __a=12 , __a=12 , __a=3072 , __a="gelu" , __a=0.0 , __a=0.0 , __a=0.02 , __a=1E-12 , __a=384 , __a=16 , __a=3 , __a=False , __a=True , __a=[2, 5, 8, 11] , __a="project" , __a=[4, 2, 1, 0.5] , __a=[96, 192, 384, 768] , __a=256 , __a=-1 , __a=False , __a=True , __a=0.4 , __a=255 , __a=0.1 , __a=[1, 1024, 24, 24] , __a=[0, 1] , __a=None , **__a , ): super().__init__(**__a ) __lowerCamelCase : str = hidden_size __lowerCamelCase : int = is_hybrid if self.is_hybrid: if backbone_config is None: logger.info('Initializing the config with a `BiT` backbone.' ) __lowerCamelCase : Tuple = { 'global_padding': 'same', 'layer_type': 'bottleneck', 'depths': [3, 4, 9], 'out_features': ['stage1', 'stage2', 'stage3'], 'embedding_dynamic_padding': True, } __lowerCamelCase : str = BitConfig(**__a ) elif isinstance(__a , __a ): logger.info('Initializing the config with a `BiT` backbone.' ) __lowerCamelCase : Tuple = BitConfig(**__a ) elif isinstance(__a , __a ): __lowerCamelCase : List[Any] = backbone_config else: raise ValueError( f'''backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.''' ) __lowerCamelCase : Union[str, Any] = backbone_featmap_shape __lowerCamelCase : Tuple = neck_ignore_stages if readout_type != "project": raise ValueError('Readout type must be \'project\' when using `DPT-hybrid` mode.' ) else: __lowerCamelCase : Dict = None __lowerCamelCase : Any = None __lowerCamelCase : List[Any] = [] __lowerCamelCase : Dict = num_hidden_layers __lowerCamelCase : Tuple = num_attention_heads __lowerCamelCase : Tuple = intermediate_size __lowerCamelCase : Union[str, Any] = hidden_act __lowerCamelCase : List[str] = hidden_dropout_prob __lowerCamelCase : Dict = attention_probs_dropout_prob __lowerCamelCase : Union[str, Any] = initializer_range __lowerCamelCase : List[Any] = layer_norm_eps __lowerCamelCase : Any = image_size __lowerCamelCase : Tuple = patch_size __lowerCamelCase : Dict = num_channels __lowerCamelCase : Dict = qkv_bias __lowerCamelCase : List[Any] = backbone_out_indices if readout_type not in ["ignore", "add", "project"]: raise ValueError('Readout_type must be one of [\'ignore\', \'add\', \'project\']' ) __lowerCamelCase : int = readout_type __lowerCamelCase : Optional[Any] = reassemble_factors __lowerCamelCase : str = neck_hidden_sizes __lowerCamelCase : str = fusion_hidden_size __lowerCamelCase : Any = head_in_index __lowerCamelCase : List[Any] = use_batch_norm_in_fusion_residual # auxiliary head attributes (semantic segmentation) __lowerCamelCase : Any = use_auxiliary_head __lowerCamelCase : Tuple = auxiliary_loss_weight __lowerCamelCase : int = semantic_loss_ignore_index __lowerCamelCase : int = semantic_classifier_dropout def snake_case_ ( self ): __lowerCamelCase : str = copy.deepcopy(self.__dict__ ) if output["backbone_config"] is not None: __lowerCamelCase : Dict = self.backbone_config.to_dict() __lowerCamelCase : Union[str, Any] = self.__class__.model_type return output
708
"""simple docstring""" import unittest from transformers import DebertaVaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DebertaVaForMaskedLM, DebertaVaForMultipleChoice, DebertaVaForQuestionAnswering, DebertaVaForSequenceClassification, DebertaVaForTokenClassification, DebertaVaModel, ) from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST class __lowercase( lowercase__ ): '''simple docstring''' def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=512 , __a=16 , __a=2 , __a=0.02 , __a=False , __a=True , __a="None" , __a=3 , __a=4 , __a=None , ): __lowerCamelCase : List[str] = parent __lowerCamelCase : Dict = batch_size __lowerCamelCase : str = seq_length __lowerCamelCase : Optional[int] = is_training __lowerCamelCase : Dict = use_input_mask __lowerCamelCase : Dict = use_token_type_ids __lowerCamelCase : Dict = use_labels __lowerCamelCase : Optional[Any] = vocab_size __lowerCamelCase : Any = hidden_size __lowerCamelCase : List[Any] = num_hidden_layers __lowerCamelCase : Tuple = num_attention_heads __lowerCamelCase : Any = intermediate_size __lowerCamelCase : Optional[Any] = hidden_act __lowerCamelCase : Any = hidden_dropout_prob __lowerCamelCase : Optional[Any] = attention_probs_dropout_prob __lowerCamelCase : List[Any] = max_position_embeddings __lowerCamelCase : Optional[Any] = type_vocab_size __lowerCamelCase : Dict = type_sequence_label_size __lowerCamelCase : Any = initializer_range __lowerCamelCase : Union[str, Any] = num_labels __lowerCamelCase : Tuple = num_choices __lowerCamelCase : str = relative_attention __lowerCamelCase : Optional[int] = position_biased_input __lowerCamelCase : int = pos_att_type __lowerCamelCase : str = scope def snake_case_ ( self ): __lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCamelCase : int = None if self.use_input_mask: __lowerCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) __lowerCamelCase : Tuple = None if self.use_token_type_ids: __lowerCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowerCamelCase : Optional[Any] = None __lowerCamelCase : Optional[Any] = None __lowerCamelCase : int = None if self.use_labels: __lowerCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowerCamelCase : List[Any] = ids_tensor([self.batch_size] , self.num_choices ) __lowerCamelCase : Optional[Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def snake_case_ ( self ): return DebertaVaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , ) def snake_case_ ( self , __a ): self.parent.assertListEqual(list(result.loss.size() ) , [] ) def snake_case_ ( self , __a , __a , __a , __a , __a , __a , __a ): __lowerCamelCase : int = DebertaVaModel(config=__a ) model.to(__a ) model.eval() __lowerCamelCase : str = model(__a , attention_mask=__a , token_type_ids=__a )[0] __lowerCamelCase : str = model(__a , token_type_ids=__a )[0] __lowerCamelCase : Optional[Any] = model(__a )[0] self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] ) def snake_case_ ( self , __a , __a , __a , __a , __a , __a , __a ): __lowerCamelCase : List[str] = DebertaVaForMaskedLM(config=__a ) model.to(__a ) model.eval() __lowerCamelCase : Union[str, Any] = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def snake_case_ ( self , __a , __a , __a , __a , __a , __a , __a ): __lowerCamelCase : Optional[int] = self.num_labels __lowerCamelCase : List[Any] = DebertaVaForSequenceClassification(__a ) model.to(__a ) model.eval() __lowerCamelCase : Optional[Any] = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a ) self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] ) self.check_loss_output(__a ) def snake_case_ ( self , __a , __a , __a , __a , __a , __a , __a ): __lowerCamelCase : int = self.num_labels __lowerCamelCase : Dict = DebertaVaForTokenClassification(config=__a ) model.to(__a ) model.eval() __lowerCamelCase : int = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def snake_case_ ( self , __a , __a , __a , __a , __a , __a , __a ): __lowerCamelCase : Optional[Any] = DebertaVaForQuestionAnswering(config=__a ) model.to(__a ) model.eval() __lowerCamelCase : Any = model( __a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def snake_case_ ( self , __a , __a , __a , __a , __a , __a , __a ): __lowerCamelCase : Any = DebertaVaForMultipleChoice(config=__a ) model.to(__a ) model.eval() __lowerCamelCase : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCamelCase : Optional[int] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCamelCase : int = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCamelCase : List[Any] = model( __a , attention_mask=__a , token_type_ids=__a , labels=__a , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def snake_case_ ( self ): __lowerCamelCase : Any = self.prepare_config_and_inputs() ( ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ) : List[str] = config_and_inputs __lowerCamelCase : int = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class __lowercase( lowercase__ , lowercase__ , unittest.TestCase ): '''simple docstring''' __a : Dict = ( ( DebertaVaModel, DebertaVaForMaskedLM, DebertaVaForSequenceClassification, DebertaVaForTokenClassification, DebertaVaForQuestionAnswering, DebertaVaForMultipleChoice, ) if is_torch_available() else () ) __a : Tuple = ( { 'feature-extraction': DebertaVaModel, 'fill-mask': DebertaVaForMaskedLM, 'question-answering': DebertaVaForQuestionAnswering, 'text-classification': DebertaVaForSequenceClassification, 'token-classification': DebertaVaForTokenClassification, 'zero-shot': DebertaVaForSequenceClassification, } if is_torch_available() else {} ) __a : str = True __a : Dict = False __a : Tuple = False __a : Optional[Any] = False __a : List[Any] = False def snake_case_ ( self ): __lowerCamelCase : List[str] = DebertaVaModelTester(self ) __lowerCamelCase : Tuple = ConfigTester(self , config_class=__a , hidden_size=37 ) def snake_case_ ( self ): self.config_tester.run_common_tests() def snake_case_ ( self ): __lowerCamelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_model(*__a ) def snake_case_ ( self ): __lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_sequence_classification(*__a ) def snake_case_ ( self ): __lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_masked_lm(*__a ) def snake_case_ ( self ): __lowerCamelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_question_answering(*__a ) def snake_case_ ( self ): __lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_token_classification(*__a ) def snake_case_ ( self ): __lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_multiple_choice(*__a ) @slow def snake_case_ ( self ): for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCamelCase : Tuple = DebertaVaModel.from_pretrained(__a ) self.assertIsNotNone(__a ) @require_torch @require_sentencepiece @require_tokenizers class __lowercase( unittest.TestCase ): '''simple docstring''' @unittest.skip(reason='Model not available yet' ) def snake_case_ ( self ): pass @slow def snake_case_ ( self ): __lowerCamelCase : Any = DebertaVaModel.from_pretrained('microsoft/deberta-v2-xlarge' ) __lowerCamelCase : Any = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] ) __lowerCamelCase : Any = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __lowerCamelCase : Union[str, Any] = model(__a , attention_mask=__a )[0] # compare the actual values for a slice. __lowerCamelCase : str = torch.tensor( [[[0.2_356, 0.1_948, 0.0_369], [-0.1_063, 0.3_586, -0.5_152], [-0.6_399, -0.0_259, -0.2_525]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __a , atol=1E-4 ) , f'''{output[:, 1:4, 1:4]}''' )
263
0
"""simple docstring""" import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch) # also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml # same for Vicuna-13b from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipImageProcessor, InstructBlipConfig, InstructBlipForConditionalGeneration, InstructBlipProcessor, InstructBlipQFormerConfig, InstructBlipVisionConfig, LlamaConfig, LlamaTokenizerFast, TaConfig, TaTokenizerFast, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def A__ ( ) -> Dict: '''simple docstring''' _UpperCAmelCase = "https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg" _UpperCAmelCase = Image.open(requests.get(A__ , stream=A__ ).raw ).convert("RGB" ) return image def A__ ( A__ ) -> Optional[Any]: '''simple docstring''' _UpperCAmelCase = [] # fmt: off # vision encoder rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding") ) rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding") ) rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight") ) rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias") ) rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight") ) rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias") ) for i in range(config.vision_config.num_hidden_layers ): rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.weight""", F"""vision_model.encoder.layers.{i}.layer_norm1.weight""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.bias""", F"""vision_model.encoder.layers.{i}.layer_norm1.bias""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.weight""", F"""vision_model.encoder.layers.{i}.layer_norm2.weight""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.bias""", F"""vision_model.encoder.layers.{i}.layer_norm2.bias""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.attn.qkv.weight""", F"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.weight""", F"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) ) rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.bias""", F"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") ) # QFormer rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.embeddings.layernorm.weight") ) rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.embeddings.layernorm.bias") ) # fmt: on return rename_keys def A__ ( A__ , A__ , A__ ) -> Tuple: '''simple docstring''' _UpperCAmelCase = dct.pop(A__ ) _UpperCAmelCase = val def A__ ( A__ , A__ ) -> int: '''simple docstring''' for i in range(config.vision_config.num_hidden_layers ): # read in original q and v biases _UpperCAmelCase = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.q_bias""" ) _UpperCAmelCase = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.v_bias""" ) # next, set bias in the state dict _UpperCAmelCase = torch.cat((q_bias, torch.zeros_like(A__ , requires_grad=A__ ), v_bias) ) _UpperCAmelCase = qkv_bias def A__ ( A__ ) -> Optional[Any]: '''simple docstring''' _UpperCAmelCase = 364 if "coco" in model_name else 224 _UpperCAmelCase = InstructBlipVisionConfig(image_size=A__ ).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "t5-xl" in model_name: _UpperCAmelCase = TaConfig.from_pretrained("google/flan-t5-xl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict() elif "t5-xxl" in model_name: _UpperCAmelCase = TaConfig.from_pretrained("google/flan-t5-xxl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict() elif "vicuna-7b" in model_name: _UpperCAmelCase = LlamaConfig.from_pretrained("decapoda-research/llama-7b-hf" , vocab_size=3_2001 ).to_dict() elif "vicuna-13b" in model_name: _UpperCAmelCase = LlamaConfig.from_pretrained("decapoda-research/llama-13b-hf" , vocab_size=3_2001 ).to_dict() else: raise ValueError("Model name not supported" ) # the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1 _UpperCAmelCase = InstructBlipQFormerConfig(vocab_size=3_0523 ).to_dict() _UpperCAmelCase = InstructBlipConfig(vision_config=A__ , text_config=A__ , qformer_config=A__ ) return config, image_size @torch.no_grad() def A__ ( A__ , A__=None , A__=False ) -> Dict: '''simple docstring''' _UpperCAmelCase = AutoTokenizer.from_pretrained("bert-base-uncased" , truncation_side="left" ) qformer_tokenizer.add_special_tokens({"bos_token": "[DEC]"} ) if "t5" in model_name: _UpperCAmelCase = TaTokenizerFast.from_pretrained("google/flan-t5-xl" , truncation_side="left" ) elif "vicuna" in model_name: # the following was used in the original implementation: # tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left") # tokenizer.add_special_tokens({"pad_token": "[PAD]"}) # tokenizer.add_special_tokens({"bos_token": "</s>"}) # tokenizer.add_special_tokens({"eos_token": "</s>"}) # tokenizer.add_special_tokens({"unk_token": "</s>"}) _UpperCAmelCase = LlamaTokenizerFast.from_pretrained( "huggyllama/llama-7b" , truncation_side="left" , bos_token="</s>" , unk_token="</s>" ) tokenizer.add_special_tokens({"pad_token": "[PAD]"} ) _UpperCAmelCase , _UpperCAmelCase = get_blipa_config(A__ ) _UpperCAmelCase = InstructBlipForConditionalGeneration(A__ ).eval() _UpperCAmelCase = { "instructblip-vicuna-7b": ("blip2_vicuna_instruct", "vicuna7b"), "instructblip-vicuna-13b": ("blip2_vicuna_instruct", "vicuna13b"), "instructblip-flan-t5-xl": ("blip2_t5_instruct", "flant5xl"), "instructblip-flan-t5-xxl": ("blip2_t5_instruct", "flant5xxl"), } _UpperCAmelCase , _UpperCAmelCase = model_name_to_original[model_name] # load original model print("Loading original model..." ) _UpperCAmelCase = "cuda:1" if torch.cuda.is_available() else "cpu" _UpperCAmelCase = "cuda:2" if torch.cuda.is_available() else "cpu" _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = load_model_and_preprocess( name=A__ , model_type=A__ , is_eval=A__ , device=A__ ) original_model.eval() print("Done!" ) # update state dict keys _UpperCAmelCase = original_model.state_dict() _UpperCAmelCase = create_rename_keys(A__ ) for src, dest in rename_keys: rename_key(A__ , A__ , A__ ) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): _UpperCAmelCase = state_dict.pop(A__ ) if key.startswith("Qformer.bert" ): _UpperCAmelCase = key.replace("Qformer.bert" , "qformer" ) if "attention.self" in key: _UpperCAmelCase = key.replace("self" , "attention" ) if "llm_proj" in key: _UpperCAmelCase = key.replace("llm_proj" , "language_projection" ) if "t5_proj" in key: _UpperCAmelCase = key.replace("t5_proj" , "language_projection" ) if key.startswith("llm_model" ): _UpperCAmelCase = key.replace("llm_model" , "language_model" ) if key.startswith("t5" ): _UpperCAmelCase = key.replace("t5" , "language" ) _UpperCAmelCase = val # read in qv biases read_in_q_v_bias(A__ , A__ ) # note: weights get loaded in torch.float32 by default hf_model.load_state_dict(A__ , strict=A__ ) _UpperCAmelCase = load_demo_image() _UpperCAmelCase = "What is unusual about this image?" # create processor _UpperCAmelCase = BlipImageProcessor( size={"height": image_size, "width": image_size} , image_mean=A__ , image_std=A__ ) _UpperCAmelCase = InstructBlipProcessor( image_processor=A__ , tokenizer=A__ , qformer_tokenizer=A__ , ) _UpperCAmelCase = processor(images=A__ , text=A__ , return_tensors="pt" ).to(A__ ) # make sure processor creates exact same pixel values _UpperCAmelCase = vis_processors["eval"](A__ ).unsqueeze(0 ).to(A__ ) _UpperCAmelCase = inputs.pixel_values assert torch.allclose(original_pixel_values.to(pixel_values.device ) , A__ ) original_model.to(A__ ) hf_model.to(A__ ) with torch.no_grad(): if "vicuna" in model_name: _UpperCAmelCase = original_model({"image": original_pixel_values, "text_input": [prompt]} ).logits _UpperCAmelCase = hf_model(**A__ ).logits else: _UpperCAmelCase = original_model( {"image": original_pixel_values, "text_input": [prompt], "text_output": ["\n"]} ).logits _UpperCAmelCase = tokenizer("\n" , return_tensors="pt" ).input_ids.to(A__ ) _UpperCAmelCase = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -100 ) _UpperCAmelCase = hf_model(**A__ , labels=A__ ).logits print("First values of original logits:" , original_logits[0, :3, :3] ) print("First values of HF logits:" , logits[0, :3, :3] ) # assert values assert original_logits.shape == logits.shape _UpperCAmelCase = 1E-4 if "vicuna" in model_name else 1E-5 assert torch.allclose(original_logits.to(logits.device ) , A__ , atol=A__ ) print("Looks ok!" ) print("Generating with original model..." ) _UpperCAmelCase = original_model.generate({"image": original_pixel_values, "prompt": prompt} , num_beams=5 ) # important: we need to cast the weights of the HF model to the appropriate type print("Generating with HF model..." ) _UpperCAmelCase = hf_model.generate( **A__ , do_sample=A__ , num_beams=5 , max_length=256 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , ) if "vicuna" in model_name: # convert output id 0 to 2 (eos_token_id) # TODO add this in the generate method? _UpperCAmelCase = 2 print("Original generation:" , A__ ) _UpperCAmelCase = processor.batch_decode(A__ , skip_special_tokens=A__ ) _UpperCAmelCase = [text.strip() for text in output_text] print("HF generation:" , A__ ) if pytorch_dump_folder_path is not None: processor.save_pretrained(A__ ) hf_model.save_pretrained(A__ ) if push_to_hub: processor.push_to_hub(F"""Salesforce/{model_name}""" ) hf_model.push_to_hub(F"""Salesforce/{model_name}""" ) if __name__ == "__main__": SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser() SCREAMING_SNAKE_CASE_ = [ '''instructblip-vicuna-7b''', '''instructblip-vicuna-13b''', '''instructblip-flan-t5-xl''', '''instructblip-flan-t5-xxl''', ] parser.add_argument( '''--model_name''', default='''instructblip-flan-t5-xl''', choices=choices, type=str, help='''Path to hf config.json of model to convert''', ) parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to push the model and processor to the hub after converting''', ) SCREAMING_SNAKE_CASE_ = parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
426
"""simple docstring""" import inspect import unittest from transformers import ConvNextVaConfig from transformers.models.auto import get_values from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class a : """simple docstring""" def __init__( self , snake_case_ , snake_case_=13 , snake_case_=32 , snake_case_=3 , snake_case_=4 , snake_case_=[10, 20, 30, 40] , snake_case_=[2, 2, 3, 2] , snake_case_=True , snake_case_=True , snake_case_=37 , snake_case_="gelu" , snake_case_=10 , snake_case_=0.02 , snake_case_=["stage2", "stage3", "stage4"] , snake_case_=[2, 3, 4] , snake_case_=None , ) -> Tuple: _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = image_size _UpperCAmelCase = num_channels _UpperCAmelCase = num_stages _UpperCAmelCase = hidden_sizes _UpperCAmelCase = depths _UpperCAmelCase = is_training _UpperCAmelCase = use_labels _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = num_labels _UpperCAmelCase = initializer_range _UpperCAmelCase = out_features _UpperCAmelCase = out_indices _UpperCAmelCase = scope def __A ( self ) -> Tuple: _UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels ) _UpperCAmelCase = self.get_config() return config, pixel_values, labels def __A ( self ) -> int: return ConvNextVaConfig( num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=snake_case_ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , ) def __A ( self , snake_case_ , snake_case_ , snake_case_ ) -> str: _UpperCAmelCase = ConvNextVaModel(config=snake_case_ ) model.to(snake_case_ ) model.eval() _UpperCAmelCase = model(snake_case_ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def __A ( self , snake_case_ , snake_case_ , snake_case_ ) -> Union[str, Any]: _UpperCAmelCase = ConvNextVaForImageClassification(snake_case_ ) model.to(snake_case_ ) model.eval() _UpperCAmelCase = model(snake_case_ , labels=snake_case_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __A ( self , snake_case_ , snake_case_ , snake_case_ ) -> str: _UpperCAmelCase = ConvNextVaBackbone(config=snake_case_ ) model.to(snake_case_ ) model.eval() _UpperCAmelCase = model(snake_case_ ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None _UpperCAmelCase = None _UpperCAmelCase = ConvNextVaBackbone(config=snake_case_ ) model.to(snake_case_ ) model.eval() _UpperCAmelCase = model(snake_case_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def __A ( self ) -> List[Any]: _UpperCAmelCase = self.prepare_config_and_inputs() _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs _UpperCAmelCase = {"pixel_values": pixel_values} return config, inputs_dict def __A ( self ) -> Tuple: _UpperCAmelCase = self.prepare_config_and_inputs() _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs _UpperCAmelCase = {"pixel_values": pixel_values, "labels": labels} return config, inputs_dict @require_torch class a ( _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, unittest.TestCase ): """simple docstring""" A__ : Dict = ( ( ConvNextVaModel, ConvNextVaForImageClassification, ConvNextVaBackbone, ) if is_torch_available() else () ) A__ : Optional[Any] = ( {"feature-extraction": ConvNextVaModel, "image-classification": ConvNextVaForImageClassification} if is_torch_available() else {} ) A__ : Any = False A__ : str = False A__ : List[str] = False A__ : Optional[int] = False A__ : Dict = False def __A ( self ) -> List[Any]: _UpperCAmelCase = ConvNextVaModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=37 ) def __A ( self ) -> int: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def __A ( self ) -> Any: return @unittest.skip(reason="ConvNextV2 does not use inputs_embeds" ) def __A ( self ) -> int: pass @unittest.skip(reason="ConvNextV2 does not support input and output embeddings" ) def __A ( self ) -> Union[str, Any]: pass @unittest.skip(reason="ConvNextV2 does not use feedforward chunking" ) def __A ( self ) -> Optional[int]: pass def __A ( self ) -> Optional[Any]: if not self.model_tester.is_training: return for model_class in self.all_model_classes: _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_with_labels() _UpperCAmelCase = True if model_class.__name__ in [ *get_values(snake_case_ ), *get_values(snake_case_ ), ]: continue _UpperCAmelCase = model_class(snake_case_ ) model.to(snake_case_ ) model.train() _UpperCAmelCase = self._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ ) _UpperCAmelCase = model(**snake_case_ ).loss loss.backward() def __A ( self ) -> str: if not self.model_tester.is_training: return for model_class in self.all_model_classes: _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_with_labels() _UpperCAmelCase = False _UpperCAmelCase = True if ( model_class.__name__ in [*get_values(snake_case_ ), *get_values(snake_case_ )] or not model_class.supports_gradient_checkpointing ): continue _UpperCAmelCase = model_class(snake_case_ ) model.to(snake_case_ ) model.gradient_checkpointing_enable() model.train() _UpperCAmelCase = self._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ ) _UpperCAmelCase = model(**snake_case_ ).loss loss.backward() def __A ( self ) -> Tuple: _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(snake_case_ ) _UpperCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCAmelCase = [*signature.parameters.keys()] _UpperCAmelCase = ["pixel_values"] self.assertListEqual(arg_names[:1] , snake_case_ ) def __A ( self ) -> int: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case_ ) def __A ( self ) -> Optional[int]: def check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ ): _UpperCAmelCase = model_class(snake_case_ ) model.to(snake_case_ ) model.eval() with torch.no_grad(): _UpperCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) ) _UpperCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _UpperCAmelCase = self.model_tester.num_stages self.assertEqual(len(snake_case_ ) , expected_num_stages + 1 ) # ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = True check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _UpperCAmelCase = True check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ ) def __A ( self ) -> Union[str, Any]: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case_ ) @slow def __A ( self ) -> Dict: for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCAmelCase = ConvNextVaModel.from_pretrained(snake_case_ ) self.assertIsNotNone(snake_case_ ) def A__ ( ) -> Optional[int]: '''simple docstring''' _UpperCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class a ( unittest.TestCase ): """simple docstring""" @cached_property def __A ( self ) -> Dict: return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None @slow def __A ( self ) -> int: _UpperCAmelCase = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(snake_case_ ) _UpperCAmelCase = self.default_image_processor _UpperCAmelCase = prepare_img() _UpperCAmelCase = preprocessor(images=snake_case_ , return_tensors="pt" ).to(snake_case_ ) # forward pass with torch.no_grad(): _UpperCAmelCase = model(**snake_case_ ) # verify the logits _UpperCAmelCase = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , snake_case_ ) _UpperCAmelCase = torch.tensor([0.99_96, 0.19_66, -0.43_86] ).to(snake_case_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case_ , atol=1e-4 ) )
426
1
'''simple docstring''' import argparse import math import traceback import dateutil.parser as date_parser import requests def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> Dict: lowercase__ : str = {} lowercase__ : Dict = job["started_at"] lowercase__ : Tuple = job["completed_at"] lowercase__ : Optional[int] = date_parser.parse(SCREAMING_SNAKE_CASE_ ) lowercase__ : Dict = date_parser.parse(SCREAMING_SNAKE_CASE_ ) lowercase__ : Optional[Any] = round((end_datetime - start_datetime).total_seconds() / 60.0 ) lowercase__ : List[str] = start lowercase__ : int = end lowercase__ : int = duration_in_min return job_info def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=None ) -> List[str]: lowercase__ : str = None if token is not None: lowercase__ : Optional[Any] = {"Accept": "application/vnd.github+json", "Authorization": F"""Bearer {token}"""} lowercase__ : Dict = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100""" lowercase__ : Union[str, Any] = requests.get(SCREAMING_SNAKE_CASE_ ,headers=SCREAMING_SNAKE_CASE_ ).json() lowercase__ : Tuple = {} try: job_time.update({job["name"]: extract_time_from_single_job(SCREAMING_SNAKE_CASE_ ) for job in result["jobs"]} ) lowercase__ : Dict = math.ceil((result["total_count"] - 1_00) / 1_00 ) for i in range(SCREAMING_SNAKE_CASE_ ): lowercase__ : int = requests.get(url + F"""&page={i + 2}""" ,headers=SCREAMING_SNAKE_CASE_ ).json() job_time.update({job["name"]: extract_time_from_single_job(SCREAMING_SNAKE_CASE_ ) for job in result["jobs"]} ) return job_time except Exception: print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" ) return {} if __name__ == "__main__": __a : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''') __a : Optional[int] = parser.parse_args() __a : Union[str, Any] = get_job_time(args.workflow_run_id) __a : Optional[int] = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True)) for k, v in job_time.items(): print(f'{k}: {v["duration"]}')
720
from collections import OrderedDict from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import TensorType, logging if TYPE_CHECKING: from ...onnx.config import PatchingSpec from ...tokenization_utils_base import PreTrainedTokenizerBase __a : str = logging.get_logger(__name__) __a : Any = { '''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json''', '''allenai/longformer-large-4096''': '''https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json''', '''allenai/longformer-large-4096-finetuned-triviaqa''': ( '''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json''' ), '''allenai/longformer-base-4096-extra.pos.embd.only''': ( '''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json''' ), '''allenai/longformer-large-4096-extra.pos.embd.only''': ( '''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json''' ), } class UpperCAmelCase( snake_case_ ): """simple docstring""" a : Tuple = """longformer""" def __init__( self , lowerCamelCase = 512 , lowerCamelCase = 2 , lowerCamelCase = 1 , lowerCamelCase = 0 , lowerCamelCase = 2 , lowerCamelCase = 30522 , lowerCamelCase = 768 , lowerCamelCase = 12 , lowerCamelCase = 12 , lowerCamelCase = 3072 , lowerCamelCase = "gelu" , lowerCamelCase = 0.1 , lowerCamelCase = 0.1 , lowerCamelCase = 512 , lowerCamelCase = 2 , lowerCamelCase = 0.02 , lowerCamelCase = 1E-12 , lowerCamelCase = False , **lowerCamelCase , ) -> List[Any]: """simple docstring""" super().__init__(pad_token_id=lowerCamelCase , **lowerCamelCase ) lowercase__ : Dict = attention_window lowercase__ : Optional[int] = sep_token_id lowercase__ : List[Any] = bos_token_id lowercase__ : List[str] = eos_token_id lowercase__ : Union[str, Any] = vocab_size lowercase__ : int = hidden_size lowercase__ : Tuple = num_hidden_layers lowercase__ : Tuple = num_attention_heads lowercase__ : Optional[Any] = hidden_act lowercase__ : int = intermediate_size lowercase__ : Optional[int] = hidden_dropout_prob lowercase__ : Optional[int] = attention_probs_dropout_prob lowercase__ : Optional[Any] = max_position_embeddings lowercase__ : Optional[int] = type_vocab_size lowercase__ : Any = initializer_range lowercase__ : Any = layer_norm_eps lowercase__ : List[Any] = onnx_export class UpperCAmelCase( snake_case_ ): """simple docstring""" def __init__( self , lowerCamelCase , lowerCamelCase = "default" , lowerCamelCase = None ) -> Any: """simple docstring""" super().__init__(lowerCamelCase , lowerCamelCase , lowerCamelCase ) lowercase__ : str = True @property def __a ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "multiple-choice": lowercase__ : List[str] = {0: "batch", 1: "choice", 2: "sequence"} else: lowercase__ : Union[str, Any] = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("global_attention_mask", dynamic_axis), ] ) @property def __a ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" lowercase__ : Optional[Any] = super().outputs if self.task == "default": lowercase__ : Optional[Any] = {0: "batch"} return outputs @property def __a ( self ) -> float: """simple docstring""" return 1E-4 @property def __a ( self ) -> int: """simple docstring""" return max(super().default_onnx_opset , 14 ) def __a ( self , lowerCamelCase , lowerCamelCase = -1 , lowerCamelCase = -1 , lowerCamelCase = False , lowerCamelCase = None , ) -> Mapping[str, Any]: """simple docstring""" lowercase__ : int = super().generate_dummy_inputs( preprocessor=lowerCamelCase , batch_size=lowerCamelCase , seq_length=lowerCamelCase , is_pair=lowerCamelCase , framework=lowerCamelCase ) import torch # for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64) # makes the export fail randomly lowercase__ : Any = torch.zeros_like(inputs["input_ids"] ) # make every second token global lowercase__ : List[Any] = 1 return inputs
298
0
def _snake_case ( __snake_case , __snake_case ): assert x is not None assert y is not None _UpperCamelCase = len(__snake_case ) _UpperCamelCase = len(__snake_case ) # declaring the array for storing the dp values _UpperCamelCase = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741 for i in range(1 , m + 1 ): for j in range(1 , n + 1 ): _UpperCamelCase = 1 if x[i - 1] == y[j - 1] else 0 _UpperCamelCase = max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match ) _UpperCamelCase = '''''' _UpperCamelCase , _UpperCamelCase = m, n while i > 0 and j > 0: _UpperCamelCase = 1 if x[i - 1] == y[j - 1] else 0 if l[i][j] == l[i - 1][j - 1] + match: if match == 1: _UpperCamelCase = x[i - 1] + seq i -= 1 j -= 1 elif l[i][j] == l[i - 1][j]: i -= 1 else: j -= 1 return l[m][n], seq if __name__ == "__main__": _lowerCAmelCase = "AGGTAB" _lowerCAmelCase = "GXTXAYB" _lowerCAmelCase = 4 _lowerCAmelCase = "GTAB" _lowerCAmelCase, _lowerCAmelCase = longest_common_subsequence(a, b) print("len =", ln, ", sub-sequence =", subseq) import doctest doctest.testmod()
10
def _snake_case ( __snake_case = 100 ): _UpperCamelCase = (n * (n + 1) // 2) ** 2 _UpperCamelCase = n * (n + 1) * (2 * n + 1) // 6 return sum_cubes - sum_squares if __name__ == "__main__": print(f'{solution() = }')
10
1
'''simple docstring''' import argparse from torch import nn # transformers_old should correspond to branch `save_old_prophetnet_model_structure` here # original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively from transformers_old.modeling_prophetnet import ( ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld, ) from transformers_old.modeling_xlm_prophetnet import ( XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld, ) from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging _lowerCamelCase : Any = logging.get_logger(__name__) logging.set_verbosity_info() def __lowerCamelCase ( A__ , A__ ) -> List[Any]: """simple docstring""" if "xprophetnet" in prophetnet_checkpoint_path: UpperCamelCase = XLMProphetNetForConditionalGenerationOld.from_pretrained(A__ ) UpperCamelCase , UpperCamelCase = XLMProphetNetForConditionalGeneration.from_pretrained( A__ , output_loading_info=A__ ) else: UpperCamelCase = ProphetNetForConditionalGenerationOld.from_pretrained(A__ ) UpperCamelCase , UpperCamelCase = ProphetNetForConditionalGeneration.from_pretrained( A__ , output_loading_info=A__ ) UpperCamelCase = ['key_proj', 'value_proj', 'query_proj'] UpperCamelCase = { 'self_attn': 'ngram_self_attn', 'cross_attn': 'encoder_attn', 'cross_attn_layer_norm': 'encoder_attn_layer_norm', 'feed_forward_layer_norm': 'final_layer_norm', 'feed_forward': '', 'intermediate': 'fc1', 'output': 'fc2', 'key_proj': 'k_proj', 'query_proj': 'q_proj', 'value_proj': 'v_proj', 'word_embeddings': 'embed_tokens', 'embeddings_layer_norm': 'emb_layer_norm', 'relative_pos_embeddings': 'relative_linear', 'ngram_embeddings': 'ngram_input_embed', 'position_embeddings': 'embed_positions', } for key in loading_info["missing_keys"]: UpperCamelCase = key.split('.' ) if attributes[0] == "lm_head": UpperCamelCase = prophet UpperCamelCase = prophet_old else: UpperCamelCase = prophet.prophetnet UpperCamelCase = prophet_old.model UpperCamelCase = False for attribute in attributes: if attribute in mapping: UpperCamelCase = mapping[attribute] if not hasattr(A__ , A__ ) and len(A__ ) > 0: UpperCamelCase = attribute elif hasattr(A__ , A__ ): UpperCamelCase = attribute if attribute == "weight": assert old_model.weight.shape == model.weight.shape, "Shapes have to match!" UpperCamelCase = old_model.weight logger.info(F"""{attribute} is initialized.""" ) UpperCamelCase = True break elif attribute == "bias": assert old_model.bias.shape == model.bias.shape, "Shapes have to match!" UpperCamelCase = old_model.bias logger.info(F"""{attribute} is initialized""" ) UpperCamelCase = True break elif attribute in special_keys and hasattr(A__ , 'in_proj_weight' ): UpperCamelCase = old_model.in_proj_weight.shape[0] // 3 UpperCamelCase = getattr(A__ , A__ ) param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match" param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match" if attribute == "query_proj": UpperCamelCase = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] ) UpperCamelCase = nn.Parameter(old_model.in_proj_bias[:embed_dim] ) elif attribute == "key_proj": UpperCamelCase = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] ) UpperCamelCase = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] ) elif attribute == "value_proj": UpperCamelCase = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] ) UpperCamelCase = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] ) UpperCamelCase = True break elif attribute == "position_embeddings": assert ( model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1] ), "Hidden size has to match" assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings." UpperCamelCase = nn.Parameter(old_model.embed_positions.weight[:512, :] ) UpperCamelCase = True break if attribute.isdigit(): UpperCamelCase = model[int(A__ )] UpperCamelCase = old_model[int(A__ )] else: UpperCamelCase = getattr(A__ , A__ ) if old_attribute == "": UpperCamelCase = old_model else: if not hasattr(A__ , A__ ): raise ValueError(F"""{old_model} does not have {old_attribute}""" ) UpperCamelCase = getattr(A__ , A__ ) if not is_key_init: raise ValueError(F"""{key} was not correctly initialized!""" ) print(F"""Saving model to {pytorch_dump_folder_path}""" ) prophet.save_pretrained(A__ ) if __name__ == "__main__": _lowerCamelCase : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--prophetnet_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) _lowerCamelCase : int = parser.parse_args() convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
324
'''simple docstring''' import math import os import unittest from transformers import MegatronBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, MegatronBertModel, ) class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any]=1_3 , UpperCamelCase__ : Optional[int]=7 , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Optional[int]=9_9 , UpperCamelCase__ : List[Any]=6_4 , UpperCamelCase__ : str=3_2 , UpperCamelCase__ : List[Any]=5 , UpperCamelCase__ : int=4 , UpperCamelCase__ : List[Any]=3_7 , UpperCamelCase__ : List[Any]="gelu" , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : Optional[int]=5_1_2 , UpperCamelCase__ : Optional[int]=1_6 , UpperCamelCase__ : str=2 , UpperCamelCase__ : List[str]=0.0_2 , UpperCamelCase__ : Optional[Any]=3 , UpperCamelCase__ : int=4 , UpperCamelCase__ : List[str]=None , ): """simple docstring""" UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = seq_length UpperCamelCase = is_training UpperCamelCase = use_input_mask UpperCamelCase = use_token_type_ids UpperCamelCase = use_labels UpperCamelCase = vocab_size UpperCamelCase = hidden_size UpperCamelCase = embedding_size UpperCamelCase = num_hidden_layers UpperCamelCase = num_attention_heads UpperCamelCase = intermediate_size UpperCamelCase = hidden_act UpperCamelCase = hidden_dropout_prob UpperCamelCase = attention_probs_dropout_prob UpperCamelCase = max_position_embeddings UpperCamelCase = type_vocab_size UpperCamelCase = type_sequence_label_size UpperCamelCase = initializer_range UpperCamelCase = num_labels UpperCamelCase = num_choices UpperCamelCase = scope def A ( self : int ): """simple docstring""" UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase = None if self.use_input_mask: UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) UpperCamelCase = None if self.use_token_type_ids: UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCamelCase = None UpperCamelCase = None UpperCamelCase = None if self.use_labels: UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices ) UpperCamelCase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def A ( self : Any ): """simple docstring""" return MegatronBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , ) def A ( self : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int ): """simple docstring""" UpperCamelCase = MegatronBertModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ ) UpperCamelCase = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ ) UpperCamelCase = model(UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def A ( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple ): """simple docstring""" UpperCamelCase = MegatronBertForMaskedLM(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def A ( self : str , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int ): """simple docstring""" UpperCamelCase = MegatronBertForCausalLM(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def A ( self : int , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : str ): """simple docstring""" UpperCamelCase = MegatronBertForNextSentencePrediction(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() UpperCamelCase = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def A ( self : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any ): """simple docstring""" UpperCamelCase = MegatronBertForPreTraining(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() UpperCamelCase = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , next_sentence_label=UpperCamelCase__ , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def A ( self : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict ): """simple docstring""" UpperCamelCase = MegatronBertForQuestionAnswering(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() UpperCamelCase = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def A ( self : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : int ): """simple docstring""" UpperCamelCase = self.num_labels UpperCamelCase = MegatronBertForSequenceClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A ( self : Dict , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple ): """simple docstring""" UpperCamelCase = self.num_labels UpperCamelCase = MegatronBertForTokenClassification(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def A ( self : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int ): """simple docstring""" UpperCamelCase = self.num_choices UpperCamelCase = MegatronBertForMultipleChoice(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() UpperCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCamelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCamelCase = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def A ( self : str ): """simple docstring""" UpperCamelCase = self.prepare_config_and_inputs() ( ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ) = config_and_inputs UpperCamelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ): """simple docstring""" _SCREAMING_SNAKE_CASE = ( ( MegatronBertModel, MegatronBertForMaskedLM, MegatronBertForCausalLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, ) if is_torch_available() else () ) _SCREAMING_SNAKE_CASE = ( { """feature-extraction""": MegatronBertModel, """fill-mask""": MegatronBertForMaskedLM, """question-answering""": MegatronBertForQuestionAnswering, """text-classification""": MegatronBertForSequenceClassification, """text-generation""": MegatronBertForCausalLM, """token-classification""": MegatronBertForTokenClassification, """zero-shot""": MegatronBertForSequenceClassification, } if is_torch_available() else {} ) _SCREAMING_SNAKE_CASE = True # test_resize_embeddings = False _SCREAMING_SNAKE_CASE = False def A ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : str=False ): """simple docstring""" UpperCamelCase = super()._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ ) if return_labels: if model_class in get_values(UpperCamelCase__ ): UpperCamelCase = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=UpperCamelCase__ ) UpperCamelCase = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ ) return inputs_dict def A ( self : Optional[Any] ): """simple docstring""" UpperCamelCase = MegatronBertModelTester(self ) UpperCamelCase = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=3_7 ) def A ( self : List[str] ): """simple docstring""" self.config_tester.run_common_tests() def A ( self : Optional[Any] ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_model(*UpperCamelCase__ ) def A ( self : Tuple ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_masked_lm(*UpperCamelCase__ ) def A ( self : Optional[Any] ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*UpperCamelCase__ ) def A ( self : Any ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*UpperCamelCase__ ) def A ( self : Optional[Any] ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_pretraining(*UpperCamelCase__ ) def A ( self : str ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_question_answering(*UpperCamelCase__ ) def A ( self : Any ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*UpperCamelCase__ ) def A ( self : Optional[int] ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_token_classification(*UpperCamelCase__ ) def __lowerCamelCase ( A__ ) -> Union[str, Any]: """simple docstring""" return torch.tensor( A__ , dtype=torch.long , device=A__ , ) _lowerCamelCase : List[str] = 1e-4 @require_torch @require_sentencepiece @require_tokenizers class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" @slow @unittest.skip('Model is not available.' ) def A ( self : List[Any] ): """simple docstring""" UpperCamelCase = 'nvidia/megatron-bert-uncased-345m' if "MYDIR" in os.environ: UpperCamelCase = os.path.join(os.environ['MYDIR'] , UpperCamelCase__ ) UpperCamelCase = MegatronBertModel.from_pretrained(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.half() UpperCamelCase = _long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]] ) with torch.no_grad(): UpperCamelCase = model(UpperCamelCase__ )[0] UpperCamelCase = torch.Size((1, 9, 1_0_2_4) ) self.assertEqual(output.shape , UpperCamelCase__ ) UpperCamelCase = [-0.6_0_4_0, -0.2_5_1_7, -0.1_0_2_5, 0.3_4_2_0, -0.6_7_5_8, -0.0_0_1_7, -0.1_0_8_9, -0.1_9_9_0, 0.5_7_2_8] for ii in range(3 ): for jj in range(3 ): UpperCamelCase = output[0, ii, jj] UpperCamelCase = expected[3 * ii + jj] UpperCamelCase = 'ii={} jj={} a={} b={}'.format(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) self.assertTrue(math.isclose(UpperCamelCase__ , UpperCamelCase__ , rel_tol=UpperCamelCase__ , abs_tol=UpperCamelCase__ ) , msg=UpperCamelCase__ )
324
1
from collections.abc import Callable from math import pi, sqrt from random import uniform from statistics import mean def lowerCAmelCase_ ( lowerCamelCase ): # A local function to see if a dot lands in the circle. def is_in_circle(lowerCamelCase , lowerCamelCase ) -> bool: __magic_name__ : Dict =sqrt((x**2) + (y**2) ) # Our circle has a radius of 1, so a distance # greater than 1 would land outside the circle. return distance_from_centre <= 1 # The proportion of guesses that landed in the circle __magic_name__ : Union[str, Any] =mean( int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) ) for _ in range(lowerCamelCase ) ) # The ratio of the area for circle to square is pi/4. __magic_name__ : List[Any] =proportion * 4 print(F"The estimated value of pi is {pi_estimate}" ) print(F"The numpy value of pi is {pi}" ) print(F"The total error is {abs(pi - pi_estimate )}" ) def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase = 0.0 , lowerCamelCase = 1.0 , ): return mean( function_to_integrate(uniform(lowerCamelCase , lowerCamelCase ) ) for _ in range(lowerCamelCase ) ) * (max_value - min_value) def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase = 0.0 , lowerCamelCase = 1.0 ): def identity_function(lowerCamelCase ) -> float: return x __magic_name__ : Optional[int] =area_under_curve_estimator( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) __magic_name__ : str =(max_value * max_value - min_value * min_value) / 2 print("""******************""" ) print(F"Estimating area under y=x where x varies from {min_value} to {max_value}" ) print(F"Estimated value is {estimated_value}" ) print(F"Expected value is {expected_value}" ) print(F"Total error is {abs(estimated_value - expected_value )}" ) print("""******************""" ) def lowerCAmelCase_ ( lowerCamelCase ): def function_to_integrate(lowerCamelCase ) -> float: return sqrt(4.0 - x * x ) __magic_name__ : Dict =area_under_curve_estimator( lowerCamelCase , lowerCamelCase , 0.0 , 2.0 ) print("""******************""" ) print("""Estimating pi using area_under_curve_estimator""" ) print(F"Estimated value is {estimated_value}" ) print(F"Expected value is {pi}" ) print(F"Total error is {abs(estimated_value - pi )}" ) print("""******************""" ) if __name__ == "__main__": import doctest doctest.testmod()
21
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from typing import Optional import numpy as np import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING, AutoConfig, AutoImageProcessor, AutoModelForMaskedImageModeling, HfArgumentParser, Trainer, TrainingArguments, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version lowerCAmelCase: Optional[Any] = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('4.31.0') require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt') lowerCAmelCase: Optional[int] = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys()) lowerCAmelCase: int = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class a__: lowercase__ = field( default="""cifar10""" , metadata={"""help""": """Name of a dataset from the datasets package"""} ) lowercase__ = field( default=lowerCamelCase__ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} ) lowercase__ = field( default=lowerCamelCase__ , metadata={"""help""": """The column name of the images in the files. If not set, will try to use 'image' or 'img'."""} , ) lowercase__ = field(default=lowerCamelCase__ , metadata={"""help""": """A folder containing the training data."""} ) lowercase__ = field(default=lowerCamelCase__ , metadata={"""help""": """A folder containing the validation data."""} ) lowercase__ = field( default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} ) lowercase__ = field(default=32 , metadata={"""help""": """The size of the square patches to use for masking."""} ) lowercase__ = field( default=0.6 , metadata={"""help""": """Percentage of patches to mask."""} , ) lowercase__ = field( default=lowerCamelCase__ , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of training examples to this """ """value if set.""" ) } , ) lowercase__ = field( default=lowerCamelCase__ , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of evaluation examples to this """ """value if set.""" ) } , ) def lowercase_ ( self : List[Any] ): a : Any = {} if self.train_dir is not None: a : Dict = self.train_dir if self.validation_dir is not None: a : Union[str, Any] = self.validation_dir a : Any = data_files if data_files else None @dataclass class a__: lowercase__ = field( default=lowerCamelCase__ , metadata={ """help""": ( """The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a """ """checkpoint identifier on the hub. """ """Don't set if you want to train a model from scratch.""" ) } , ) lowercase__ = field( default=lowerCamelCase__ , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(lowerCamelCase__ )} , ) lowercase__ = field( default=lowerCamelCase__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) lowercase__ = field( default=lowerCamelCase__ , metadata={ """help""": ( """Override some existing default config settings when a model is trained from scratch. Example: """ """n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index""" ) } , ) lowercase__ = field( default=lowerCamelCase__ , metadata={"""help""": """Where do you want to store (cache) the pretrained models/datasets downloaded from the hub"""} , ) lowercase__ = field( default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , ) lowercase__ = field(default=lowerCamelCase__ , metadata={"""help""": """Name or path of preprocessor config."""} ) lowercase__ = field( default=lowerCamelCase__ , metadata={ """help""": ( """Will use the token generated when running `huggingface-cli login` (necessary to use this script """ """with private models).""" ) } , ) lowercase__ = field( default=lowerCamelCase__ , metadata={ """help""": ( """The size (resolution) of each image. If not specified, will use `image_size` of the configuration.""" ) } , ) lowercase__ = field( default=lowerCamelCase__ , metadata={ """help""": ( """The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.""" ) } , ) lowercase__ = field( default=lowerCamelCase__ , metadata={"""help""": """Stride to use for the encoder."""} , ) class a__: def __init__( self : List[str] , __snake_case : int=1_92 , __snake_case : int=32 , __snake_case : List[str]=4 , __snake_case : Union[str, Any]=0.6 ): a : Any = input_size a : Union[str, Any] = mask_patch_size a : int = model_patch_size a : Tuple = mask_ratio if self.input_size % self.mask_patch_size != 0: raise ValueError('Input size must be divisible by mask patch size' ) if self.mask_patch_size % self.model_patch_size != 0: raise ValueError('Mask patch size must be divisible by model patch size' ) a : str = self.input_size // self.mask_patch_size a : Union[str, Any] = self.mask_patch_size // self.model_patch_size a : str = self.rand_size**2 a : Tuple = int(np.ceil(self.token_count * self.mask_ratio ) ) def __call__( self : str ): a : List[str] = np.random.permutation(self.token_count )[: self.mask_count] a : List[str] = np.zeros(self.token_count , dtype=__snake_case ) a : Any = 1 a : List[str] = mask.reshape((self.rand_size, self.rand_size) ) a : Tuple = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 ) return torch.tensor(mask.flatten() ) def lowerCamelCase__ ( _A ): a : str = torch.stack([example['pixel_values'] for example in examples] ) a : List[str] = torch.stack([example['mask'] for example in examples] ) return {"pixel_values": pixel_values, "bool_masked_pos": mask} def lowerCamelCase__ ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. a : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. a , a , a : Dict = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: a , a , a : int = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('run_mim' , _A , _A ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() a : int = training_args.get_process_log_level() logger.setLevel(_A ) transformers.utils.logging.set_verbosity(_A ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) logger.info(f"""Training/evaluation parameters {training_args}""" ) # Detecting last checkpoint. a : Tuple = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: a : List[Any] = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f"""Output directory ({training_args.output_dir}) already exists and is not empty. """ 'Use --overwrite_output_dir to overcome.' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ 'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' ) # Initialize our dataset. a : Any = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. a : Union[str, Any] = None if 'validation' in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , _A ) and data_args.train_val_split > 0.0: a : Any = ds['train'].train_test_split(data_args.train_val_split ) a : str = split['train'] a : Any = split['test'] # Create config # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. a : Tuple = { 'cache_dir': model_args.cache_dir, 'revision': model_args.model_revision, 'use_auth_token': True if model_args.use_auth_token else None, } if model_args.config_name_or_path: a : Any = AutoConfig.from_pretrained(model_args.config_name_or_path , **_A ) elif model_args.model_name_or_path: a : Optional[int] = AutoConfig.from_pretrained(model_args.model_name_or_path , **_A ) else: a : Optional[int] = CONFIG_MAPPING[model_args.model_type]() logger.warning('You are instantiating a new config instance from scratch.' ) if model_args.config_overrides is not None: logger.info(f"""Overriding config: {model_args.config_overrides}""" ) config.update_from_string(model_args.config_overrides ) logger.info(f"""New config: {config}""" ) # make sure the decoder_type is "simmim" (only relevant for BEiT) if hasattr(_A , 'decoder_type' ): a : List[str] = 'simmim' # adapt config a : str = model_args.image_size if model_args.image_size is not None else config.image_size a : Union[str, Any] = model_args.patch_size if model_args.patch_size is not None else config.patch_size a : Any = ( model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride ) config.update( { 'image_size': model_args.image_size, 'patch_size': model_args.patch_size, 'encoder_stride': model_args.encoder_stride, } ) # create image processor if model_args.image_processor_name: a : Union[str, Any] = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **_A ) elif model_args.model_name_or_path: a : Union[str, Any] = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **_A ) else: a : List[Any] = { conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items() } a : int = IMAGE_PROCESSOR_TYPES[model_args.model_type]() # create model if model_args.model_name_or_path: a : Tuple = AutoModelForMaskedImageModeling.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_A , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info('Training new model from scratch' ) a : Union[str, Any] = AutoModelForMaskedImageModeling.from_config(_A ) if training_args.do_train: a : Tuple = ds['train'].column_names else: a : Dict = ds['validation'].column_names if data_args.image_column_name is not None: a : Optional[Any] = data_args.image_column_name elif "image" in column_names: a : str = 'image' elif "img" in column_names: a : Union[str, Any] = 'img' else: a : str = column_names[0] # transformations as done in original SimMIM paper # source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py a : Optional[int] = Compose( [ Lambda(lambda _A : img.convert('RGB' ) if img.mode != "RGB" else img ), RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) # create mask generator a : Dict = MaskGenerator( input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , ) def preprocess_images(_A ): a : Dict = [transforms(_A ) for image in examples[image_column_name]] a : Optional[int] = [mask_generator() for i in range(len(examples[image_column_name] ) )] return examples if training_args.do_train: if "train" not in ds: raise ValueError('--do_train requires a train dataset' ) if data_args.max_train_samples is not None: a : List[Any] = ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(_A ) if training_args.do_eval: if "validation" not in ds: raise ValueError('--do_eval requires a validation dataset' ) if data_args.max_eval_samples is not None: a : str = ( ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(_A ) # Initialize our trainer a : List[str] = Trainer( model=_A , args=_A , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=_A , data_collator=_A , ) # Training if training_args.do_train: a : str = None if training_args.resume_from_checkpoint is not None: a : Any = training_args.resume_from_checkpoint elif last_checkpoint is not None: a : Dict = last_checkpoint a : Optional[int] = trainer.train(resume_from_checkpoint=_A ) trainer.save_model() trainer.log_metrics('train' , train_result.metrics ) trainer.save_metrics('train' , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: a : List[Any] = trainer.evaluate() trainer.log_metrics('eval' , _A ) trainer.save_metrics('eval' , _A ) # Write model card and (optionally) push to hub a : str = { 'finetuned_from': model_args.model_name_or_path, 'tasks': 'masked-image-modeling', 'dataset': data_args.dataset_name, 'tags': ['masked-image-modeling'], } if training_args.push_to_hub: trainer.push_to_hub(**_A ) else: trainer.create_model_card(**_A ) if __name__ == "__main__": main()
526
0
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL UpperCAmelCase : str =logging.get_logger(__name__) class _lowercase (SCREAMING_SNAKE_CASE__ ): '''simple docstring''' lowercase__ = ["""pixel_values"""] def __init__( self , snake_case__ = True , snake_case__ = None , snake_case__ = None , snake_case__ = PILImageResampling.BILINEAR , snake_case__ = True , snake_case__ = 1 / 255 , snake_case__ = True , snake_case__ = None , snake_case__ = None , **snake_case__ , ): '''simple docstring''' super().__init__(**_lowercase ) UpperCamelCase_ = size if size is not None else {"""shortest_edge""": 384} UpperCamelCase_ = get_size_dict(_lowercase , default_to_square=_lowercase ) UpperCamelCase_ = do_resize UpperCamelCase_ = size # Default value set here for backwards compatibility where the value in config is None UpperCamelCase_ = crop_pct if crop_pct is not None else 224 / 256 UpperCamelCase_ = resample UpperCamelCase_ = do_rescale UpperCamelCase_ = rescale_factor UpperCamelCase_ = do_normalize UpperCamelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN UpperCamelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD def _lowerCamelCase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = PILImageResampling.BICUBIC , snake_case__ = None , **snake_case__ , ): '''simple docstring''' UpperCamelCase_ = get_size_dict(_lowercase , default_to_square=_lowercase ) if "shortest_edge" not in size: raise ValueError(F"""Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}""" ) UpperCamelCase_ = size["""shortest_edge"""] if shortest_edge < 384: # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct UpperCamelCase_ = int(shortest_edge / crop_pct ) UpperCamelCase_ = get_resize_output_image_size(_lowercase , size=_lowercase , default_to_square=_lowercase ) UpperCamelCase_ = resize(image=_lowercase , size=_lowercase , resample=_lowercase , data_format=_lowercase , **_lowercase ) # then crop to (shortest_edge, shortest_edge) return center_crop(image=_lowercase , size=(shortest_edge, shortest_edge) , data_format=_lowercase , **_lowercase ) else: # warping (no cropping) when evaluated at 384 or larger return resize( _lowercase , size=(shortest_edge, shortest_edge) , resample=_lowercase , data_format=_lowercase , **_lowercase ) def _lowerCamelCase ( self , snake_case__ , snake_case__ , snake_case__ = None , **snake_case__ , ): '''simple docstring''' return rescale(_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase ) def _lowerCamelCase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , **snake_case__ , ): '''simple docstring''' return normalize(_lowercase , mean=_lowercase , std=_lowercase , data_format=_lowercase , **_lowercase ) def _lowerCamelCase ( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = ChannelDimension.FIRST , **snake_case__ , ): '''simple docstring''' UpperCamelCase_ = do_resize if do_resize is not None else self.do_resize UpperCamelCase_ = crop_pct if crop_pct is not None else self.crop_pct UpperCamelCase_ = resample if resample is not None else self.resample UpperCamelCase_ = do_rescale if do_rescale is not None else self.do_rescale UpperCamelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCamelCase_ = do_normalize if do_normalize is not None else self.do_normalize UpperCamelCase_ = image_mean if image_mean is not None else self.image_mean UpperCamelCase_ = image_std if image_std is not None else self.image_std UpperCamelCase_ = size if size is not None else self.size UpperCamelCase_ = get_size_dict(_lowercase , default_to_square=_lowercase ) UpperCamelCase_ = make_list_of_images(_lowercase ) if not valid_images(_lowercase ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True." ) if do_resize and size["shortest_edge"] < 384 and crop_pct is None: raise ValueError("crop_pct must be specified if size < 384." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # All transformations expect numpy arrays. UpperCamelCase_ = [to_numpy_array(_lowercase ) for image in images] if do_resize: UpperCamelCase_ = [self.resize(image=_lowercase , size=_lowercase , crop_pct=_lowercase , resample=_lowercase ) for image in images] if do_rescale: UpperCamelCase_ = [self.rescale(image=_lowercase , scale=_lowercase ) for image in images] if do_normalize: UpperCamelCase_ = [self.normalize(image=_lowercase , mean=_lowercase , std=_lowercase ) for image in images] UpperCamelCase_ = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images] UpperCamelCase_ = {"""pixel_values""": images} return BatchFeature(data=_lowercase , tensor_type=_lowercase )
721
from decimal import Decimal, getcontext from math import ceil, factorial def _lowerCAmelCase (_lowerCAmelCase): if not isinstance(_lowerCAmelCase , _lowerCAmelCase): raise TypeError("Undefined for non-integers") elif precision < 1: raise ValueError("Undefined for non-natural numbers") UpperCamelCase_ = precision UpperCamelCase_ = ceil(precision / 14) UpperCamelCase_ = 42_68_80 * Decimal(1_00_05).sqrt() UpperCamelCase_ = 1 UpperCamelCase_ = 13_59_14_09 UpperCamelCase_ = Decimal(_lowerCAmelCase) for k in range(1 , _lowerCAmelCase): UpperCamelCase_ = factorial(6 * k) // (factorial(3 * k) * factorial(_lowerCAmelCase) ** 3) linear_term += 5_45_14_01_34 exponential_term *= -26_25_37_41_26_40_76_80_00 partial_sum += Decimal(multinomial_term * linear_term) / exponential_term return str(constant_term / partial_sum)[:-1] if __name__ == "__main__": UpperCAmelCase : Any =50 print(F"The first {n} digits of pi is: {pi(n)}")
504
0
"""simple docstring""" import json import os import unittest from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCamelCase__ ( _lowerCAmelCase , unittest.TestCase ): """simple docstring""" A__ : str = GPTaTokenizer A__ : int = GPTaTokenizerFast A__ : Optional[int] = True A__ : List[str] = {"add_prefix_space": True} A__ : List[str] = False def snake_case__ ( self ) -> Tuple: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt A__ = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", "<|endoftext|>", ] A__ = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) ) A__ = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] A__ = {"unk_token": "<unk>"} A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(SCREAMING_SNAKE_CASE__ ) ) def snake_case__ ( self , **SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]: kwargs.update(self.special_tokens_map ) return GPTaTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ) def snake_case__ ( self , **SCREAMING_SNAKE_CASE__ ) -> Dict: kwargs.update(self.special_tokens_map ) return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ) def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> Optional[int]: A__ = "lower newer" A__ = "lower newer" return input_text, output_text def snake_case__ ( self ) -> Any: A__ = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) A__ = "lower newer" A__ = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"] A__ = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) A__ = tokens + [tokenizer.unk_token] A__ = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) def snake_case__ ( self ) -> Union[str, Any]: if not self.test_rust_tokenizer: return A__ = self.get_tokenizer() A__ = self.get_rust_tokenizer(add_prefix_space=SCREAMING_SNAKE_CASE__ ) A__ = "lower newer" # Testing tokenization A__ = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ ) A__ = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Testing conversion to ids without special tokens A__ = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ ) A__ = rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Testing conversion to ids with special tokens A__ = self.get_rust_tokenizer(add_prefix_space=SCREAMING_SNAKE_CASE__ ) A__ = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ ) A__ = rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Testing the unknown token A__ = tokens + [rust_tokenizer.unk_token] A__ = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) def snake_case__ ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> Optional[int]: # It's very difficult to mix/test pretokenization with byte-level # And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string) pass def snake_case__ ( self , SCREAMING_SNAKE_CASE__=15 ) -> Optional[int]: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): A__ = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) # Simple input A__ = "This is a simple input" A__ = ["This is a simple input 1", "This is a simple input 2"] A__ = ("This is a simple input", "This is a pair") A__ = [ ("This is a simple input 1", "This is a simple input 2"), ("This is a simple pair 1", "This is a simple pair 2"), ] # Simple input tests self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding="max_length" ) # Simple input self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding="max_length" ) # Simple input self.assertRaises( SCREAMING_SNAKE_CASE__ , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding="max_length" , ) # Pair input self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding="max_length" ) # Pair input self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding="max_length" ) # Pair input self.assertRaises( SCREAMING_SNAKE_CASE__ , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding="max_length" , ) def snake_case__ ( self ) -> Optional[int]: A__ = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" ) # Simple input A__ = "This is a simple input" A__ = ["This is a simple input looooooooong", "This is a simple input"] A__ = ("This is a simple input", "This is a pair") A__ = [ ("This is a simple input loooooong", "This is a simple input"), ("This is a simple pair loooooong", "This is a simple pair"), ] A__ = tokenizer.pad_token_id A__ = tokenizer(SCREAMING_SNAKE_CASE__ , padding="max_length" , max_length=30 , return_tensors="np" ) A__ = tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncate=SCREAMING_SNAKE_CASE__ , return_tensors="np" ) A__ = tokenizer(*SCREAMING_SNAKE_CASE__ , padding="max_length" , max_length=60 , return_tensors="np" ) A__ = tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncate=SCREAMING_SNAKE_CASE__ , return_tensors="np" ) # s # test single string max_length padding self.assertEqual(out_s["input_ids"].shape[-1] , 30 ) self.assertTrue(pad_token_id in out_s["input_ids"] ) self.assertTrue(0 in out_s["attention_mask"] ) # s2 # test automatic padding self.assertEqual(out_sa["input_ids"].shape[-1] , 33 ) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa["input_ids"][0] ) self.assertFalse(0 in out_sa["attention_mask"][0] ) # short slice does have padding self.assertTrue(pad_token_id in out_sa["input_ids"][1] ) self.assertTrue(0 in out_sa["attention_mask"][1] ) # p # test single pair max_length padding self.assertEqual(out_p["input_ids"].shape[-1] , 60 ) self.assertTrue(pad_token_id in out_p["input_ids"] ) self.assertTrue(0 in out_p["attention_mask"] ) # p2 # test automatic padding pair self.assertEqual(out_pa["input_ids"].shape[-1] , 52 ) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa["input_ids"][0] ) self.assertFalse(0 in out_pa["attention_mask"][0] ) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa["input_ids"][1] ) self.assertTrue(0 in out_pa["attention_mask"][1] ) def snake_case__ ( self ) -> Optional[Any]: A__ = "$$$" A__ = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=SCREAMING_SNAKE_CASE__ , add_bos_token=SCREAMING_SNAKE_CASE__ ) A__ = "This is a simple input" A__ = ["This is a simple input 1", "This is a simple input 2"] A__ = tokenizer.bos_token_id A__ = tokenizer(SCREAMING_SNAKE_CASE__ ) A__ = tokenizer(SCREAMING_SNAKE_CASE__ ) self.assertEqual(out_s.input_ids[0] , SCREAMING_SNAKE_CASE__ ) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) ) A__ = tokenizer.decode(out_s.input_ids ) A__ = tokenizer.batch_decode(out_sa.input_ids ) self.assertEqual(decode_s.split()[0] , SCREAMING_SNAKE_CASE__ ) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) ) def snake_case__ ( self ) -> Any: pass def snake_case__ ( self ) -> Optional[int]: # TODO: change to self.get_tokenizers() when the fast version is implemented A__ = [self.get_tokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ , add_bos_token=SCREAMING_SNAKE_CASE__ )] for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}""" ): A__ = "Encode this." A__ = "This one too please." A__ = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ ) encoded_sequence += tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ ) A__ = tokenizer.encode_plus( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_special_tokens_mask=SCREAMING_SNAKE_CASE__ , ) A__ = encoded_sequence_dict["input_ids"] A__ = encoded_sequence_dict["special_tokens_mask"] self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) ) A__ = [ (x if not special_tokens_mask[i] else None) for i, x in enumerate(SCREAMING_SNAKE_CASE__ ) ] A__ = [x for x in filtered_sequence if x is not None] self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) @require_tokenizers class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def snake_case__ ( self ) -> List[str]: # More context: # https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1 # https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519 # https://github.com/huggingface/transformers/pull/17088#discussion_r871246439 A__ = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=SCREAMING_SNAKE_CASE__ ) A__ = "A photo of a cat" A__ = tokenizer.encode( SCREAMING_SNAKE_CASE__ , ) self.assertEqual(SCREAMING_SNAKE_CASE__ , [2, 250, 1345, 9, 10, 4758] ) tokenizer.save_pretrained("test_opt" ) A__ = AutoTokenizer.from_pretrained("./test_opt" ) A__ = tokenizer.encode( SCREAMING_SNAKE_CASE__ , ) self.assertEqual(SCREAMING_SNAKE_CASE__ , [2, 250, 1345, 9, 10, 4758] ) def snake_case__ ( self ) -> List[str]: A__ = AutoTokenizer.from_pretrained("facebook/opt-350m" , use_slow=SCREAMING_SNAKE_CASE__ ) A__ = "A photo of a cat" A__ = tokenizer.encode( SCREAMING_SNAKE_CASE__ , ) # Same as above self.assertEqual(SCREAMING_SNAKE_CASE__ , [2, 250, 1345, 9, 10, 4758] ) @unittest.skip("This test is failing because of a bug in the fast tokenizer" ) def snake_case__ ( self ) -> List[Any]: A__ = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=SCREAMING_SNAKE_CASE__ ) A__ = "bos" A__ = tokenizer.get_vocab()["bos"] A__ = "A photo of a cat" A__ = tokenizer.encode( SCREAMING_SNAKE_CASE__ , ) # We changed the bos token self.assertEqual(SCREAMING_SNAKE_CASE__ , [31957, 250, 1345, 9, 10, 4758] ) tokenizer.save_pretrained("./tok" ) A__ = AutoTokenizer.from_pretrained("./tok" ) self.assertTrue(tokenizer.is_fast ) A__ = tokenizer.encode( SCREAMING_SNAKE_CASE__ , ) self.assertEqual(SCREAMING_SNAKE_CASE__ , [31957, 250, 1345, 9, 10, 4758] )
104
import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / '''utils''')) from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 lowerCAmelCase__ :Optional[Any] = get_tests_dir('''fixtures''') class __a ( unittest.TestCase ): def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase = mock.Mock() _UpperCAmelCase = 500 _UpperCAmelCase = {} _UpperCAmelCase = HTTPError _UpperCAmelCase = {} # Download this model to make sure it's in the cache. _UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch('requests.Session.request' , return_value=_SCREAMING_SNAKE_CASE ) as mock_head: _UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' ) # This check we did call the fake head request mock_head.assert_called() def UpperCAmelCase__ ( self ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained( 'https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json' ) @is_staging_test class __a ( unittest.TestCase ): @classmethod def UpperCAmelCase__ ( cls ) -> Tuple: """simple docstring""" _UpperCAmelCase = TOKEN HfFolder.save_token(_SCREAMING_SNAKE_CASE ) @classmethod def UpperCAmelCase__ ( cls ) -> Tuple: """simple docstring""" try: delete_repo(token=cls._token , repo_id='test-feature-extractor' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='valid_org/test-feature-extractor-org' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='test-dynamic-feature-extractor' ) except HTTPError: pass def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(_SCREAMING_SNAKE_CASE ) feature_extractor.push_to_hub('test-feature-extractor' , use_auth_token=self._token ) _UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(f'''{USER}/test-feature-extractor''' ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(_SCREAMING_SNAKE_CASE , getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) # Reset repo delete_repo(token=self._token , repo_id='test-feature-extractor' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained( _SCREAMING_SNAKE_CASE , repo_id='test-feature-extractor' , push_to_hub=_SCREAMING_SNAKE_CASE , use_auth_token=self._token ) _UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(f'''{USER}/test-feature-extractor''' ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(_SCREAMING_SNAKE_CASE , getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(_SCREAMING_SNAKE_CASE ) feature_extractor.push_to_hub('valid_org/test-feature-extractor' , use_auth_token=self._token ) _UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor' ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(_SCREAMING_SNAKE_CASE , getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) # Reset repo delete_repo(token=self._token , repo_id='valid_org/test-feature-extractor' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained( _SCREAMING_SNAKE_CASE , repo_id='valid_org/test-feature-extractor-org' , push_to_hub=_SCREAMING_SNAKE_CASE , use_auth_token=self._token ) _UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor-org' ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(_SCREAMING_SNAKE_CASE , getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" CustomFeatureExtractor.register_for_auto_class() _UpperCAmelCase = CustomFeatureExtractor.from_pretrained(_SCREAMING_SNAKE_CASE ) feature_extractor.push_to_hub('test-dynamic-feature-extractor' , use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual( feature_extractor.auto_map , {'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor'} , ) _UpperCAmelCase = AutoFeatureExtractor.from_pretrained( f'''{USER}/test-dynamic-feature-extractor''' , trust_remote_code=_SCREAMING_SNAKE_CASE ) # Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module self.assertEqual(new_feature_extractor.__class__.__name__ , 'CustomFeatureExtractor' )
618
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase : str = { """configuration_blenderbot""": [ """BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BlenderbotConfig""", """BlenderbotOnnxConfig""", ], """tokenization_blenderbot""": ["""BlenderbotTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Any = ["""BlenderbotTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Any = [ """BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST""", """BlenderbotForCausalLM""", """BlenderbotForConditionalGeneration""", """BlenderbotModel""", """BlenderbotPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : List[Any] = [ """TFBlenderbotForConditionalGeneration""", """TFBlenderbotModel""", """TFBlenderbotPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Any = [ """FlaxBlenderbotForConditionalGeneration""", """FlaxBlenderbotModel""", """FlaxBlenderbotPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_blenderbot import ( BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotConfig, BlenderbotOnnxConfig, ) from .tokenization_blenderbot import BlenderbotTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_fast import BlenderbotTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot import ( BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotForCausalLM, BlenderbotForConditionalGeneration, BlenderbotModel, BlenderbotPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot import ( TFBlenderbotForConditionalGeneration, TFBlenderbotModel, TFBlenderbotPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, FlaxBlenderbotPreTrainedModel, ) else: import sys lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
425
'''simple docstring''' import argparse import os.path as osp import re import torch from safetensors.torch import load_file, save_file # =================# # UNet Conversion # # =================# lowerCAmelCase : Optional[int] = [ # (stable-diffusion, HF Diffusers) ("""time_embed.0.weight""", """time_embedding.linear_1.weight"""), ("""time_embed.0.bias""", """time_embedding.linear_1.bias"""), ("""time_embed.2.weight""", """time_embedding.linear_2.weight"""), ("""time_embed.2.bias""", """time_embedding.linear_2.bias"""), ("""input_blocks.0.0.weight""", """conv_in.weight"""), ("""input_blocks.0.0.bias""", """conv_in.bias"""), ("""out.0.weight""", """conv_norm_out.weight"""), ("""out.0.bias""", """conv_norm_out.bias"""), ("""out.2.weight""", """conv_out.weight"""), ("""out.2.bias""", """conv_out.bias"""), ] lowerCAmelCase : Any = [ # (stable-diffusion, HF Diffusers) ("""in_layers.0""", """norm1"""), ("""in_layers.2""", """conv1"""), ("""out_layers.0""", """norm2"""), ("""out_layers.3""", """conv2"""), ("""emb_layers.1""", """time_emb_proj"""), ("""skip_connection""", """conv_shortcut"""), ] lowerCAmelCase : Dict = [] # hardcoded number of downblocks and resnets/attentions... # would need smarter logic for other networks. for i in range(4): # loop over downblocks/upblocks for j in range(2): # loop over resnets/attentions for downblocks lowerCAmelCase : Any = F'''down_blocks.{i}.resnets.{j}.''' lowerCAmelCase : int = F'''input_blocks.{3*i + j + 1}.0.''' unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix)) if i < 3: # no attention layers in down_blocks.3 lowerCAmelCase : Dict = F'''down_blocks.{i}.attentions.{j}.''' lowerCAmelCase : Tuple = F'''input_blocks.{3*i + j + 1}.1.''' unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix)) for j in range(3): # loop over resnets/attentions for upblocks lowerCAmelCase : str = F'''up_blocks.{i}.resnets.{j}.''' lowerCAmelCase : List[str] = F'''output_blocks.{3*i + j}.0.''' unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix)) if i > 0: # no attention layers in up_blocks.0 lowerCAmelCase : Optional[Any] = F'''up_blocks.{i}.attentions.{j}.''' lowerCAmelCase : Any = F'''output_blocks.{3*i + j}.1.''' unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix)) if i < 3: # no downsample in down_blocks.3 lowerCAmelCase : Optional[int] = F'''down_blocks.{i}.downsamplers.0.conv.''' lowerCAmelCase : List[str] = F'''input_blocks.{3*(i+1)}.0.op.''' unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix)) # no upsample in up_blocks.3 lowerCAmelCase : Union[str, Any] = F'''up_blocks.{i}.upsamplers.0.''' lowerCAmelCase : Optional[Any] = F'''output_blocks.{3*i + 2}.{1 if i == 0 else 2}.''' unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix)) lowerCAmelCase : Optional[Any] = """mid_block.attentions.0.""" lowerCAmelCase : Optional[int] = """middle_block.1.""" unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix)) for j in range(2): lowerCAmelCase : str = F'''mid_block.resnets.{j}.''' lowerCAmelCase : int = F'''middle_block.{2*j}.''' unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix)) def _A ( A ) -> str: # buyer beware: this is a *brittle* function, # and correct output requires that all of these pieces interact in # the exact order in which I have arranged them. lowercase : List[Any] = {k: k for k in unet_state_dict.keys()} for sd_name, hf_name in unet_conversion_map: lowercase : Any = sd_name for k, v in mapping.items(): if "resnets" in k: for sd_part, hf_part in unet_conversion_map_resnet: lowercase : Optional[Any] = v.replace(A ,A ) lowercase : List[Any] = v for k, v in mapping.items(): for sd_part, hf_part in unet_conversion_map_layer: lowercase : Any = v.replace(A ,A ) lowercase : Tuple = v lowercase : Any = {v: unet_state_dict[k] for k, v in mapping.items()} return new_state_dict # ================# # VAE Conversion # # ================# lowerCAmelCase : Optional[Any] = [ # (stable-diffusion, HF Diffusers) ("""nin_shortcut""", """conv_shortcut"""), ("""norm_out""", """conv_norm_out"""), ("""mid.attn_1.""", """mid_block.attentions.0."""), ] for i in range(4): # down_blocks have two resnets for j in range(2): lowerCAmelCase : List[Any] = F'''encoder.down_blocks.{i}.resnets.{j}.''' lowerCAmelCase : Optional[Any] = F'''encoder.down.{i}.block.{j}.''' vae_conversion_map.append((sd_down_prefix, hf_down_prefix)) if i < 3: lowerCAmelCase : Tuple = F'''down_blocks.{i}.downsamplers.0.''' lowerCAmelCase : Any = F'''down.{i}.downsample.''' vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix)) lowerCAmelCase : List[Any] = F'''up_blocks.{i}.upsamplers.0.''' lowerCAmelCase : int = F'''up.{3-i}.upsample.''' vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix)) # up_blocks have three resnets # also, up blocks in hf are numbered in reverse from sd for j in range(3): lowerCAmelCase : int = F'''decoder.up_blocks.{i}.resnets.{j}.''' lowerCAmelCase : List[str] = F'''decoder.up.{3-i}.block.{j}.''' vae_conversion_map.append((sd_up_prefix, hf_up_prefix)) # this part accounts for mid blocks in both the encoder and the decoder for i in range(2): lowerCAmelCase : Any = F'''mid_block.resnets.{i}.''' lowerCAmelCase : Optional[int] = F'''mid.block_{i+1}.''' vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix)) lowerCAmelCase : int = [ # (stable-diffusion, HF Diffusers) ("""norm.""", """group_norm."""), ("""q.""", """query."""), ("""k.""", """key."""), ("""v.""", """value."""), ("""proj_out.""", """proj_attn."""), ] def _A ( A ) -> Optional[Any]: # convert HF linear weights to SD conv2d weights return w.reshape(*w.shape ,1 ,1 ) def _A ( A ) -> List[str]: lowercase : Tuple = {k: k for k in vae_state_dict.keys()} for k, v in mapping.items(): for sd_part, hf_part in vae_conversion_map: lowercase : Union[str, Any] = v.replace(A ,A ) lowercase : Optional[Any] = v for k, v in mapping.items(): if "attentions" in k: for sd_part, hf_part in vae_conversion_map_attn: lowercase : str = v.replace(A ,A ) lowercase : Dict = v lowercase : int = {v: vae_state_dict[k] for k, v in mapping.items()} lowercase : Tuple = ["q", "k", "v", "proj_out"] for k, v in new_state_dict.items(): for weight_name in weights_to_convert: if F'''mid.attn_1.{weight_name}.weight''' in k: print(F'''Reshaping {k} for SD format''' ) lowercase : List[Any] = reshape_weight_for_sd(A ) return new_state_dict # =========================# # Text Encoder Conversion # # =========================# lowerCAmelCase : Any = [ # (stable-diffusion, HF Diffusers) ("""resblocks.""", """text_model.encoder.layers."""), ("""ln_1""", """layer_norm1"""), ("""ln_2""", """layer_norm2"""), (""".c_fc.""", """.fc1."""), (""".c_proj.""", """.fc2."""), (""".attn""", """.self_attn"""), ("""ln_final.""", """transformer.text_model.final_layer_norm."""), ("""token_embedding.weight""", """transformer.text_model.embeddings.token_embedding.weight"""), ("""positional_embedding""", """transformer.text_model.embeddings.position_embedding.weight"""), ] lowerCAmelCase : int = {re.escape(x[1]): x[0] for x in textenc_conversion_lst} lowerCAmelCase : List[Any] = re.compile("""|""".join(protected.keys())) # Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp lowerCAmelCase : Optional[Any] = {"""q""": 0, """k""": 1, """v""": 2} def _A ( A ) -> List[Any]: lowercase : List[Any] = {} lowercase : Optional[Any] = {} lowercase : Optional[Any] = {} for k, v in text_enc_dict.items(): if ( k.endswith(".self_attn.q_proj.weight" ) or k.endswith(".self_attn.k_proj.weight" ) or k.endswith(".self_attn.v_proj.weight" ) ): lowercase : int = k[: -len(".q_proj.weight" )] lowercase : List[str] = k[-len("q_proj.weight" )] if k_pre not in capture_qkv_weight: lowercase : Tuple = [None, None, None] lowercase : List[str] = v continue if ( k.endswith(".self_attn.q_proj.bias" ) or k.endswith(".self_attn.k_proj.bias" ) or k.endswith(".self_attn.v_proj.bias" ) ): lowercase : int = k[: -len(".q_proj.bias" )] lowercase : str = k[-len("q_proj.bias" )] if k_pre not in capture_qkv_bias: lowercase : Any = [None, None, None] lowercase : Tuple = v continue lowercase : str = textenc_pattern.sub(lambda A : protected[re.escape(m.group(0 ) )] ,A ) lowercase : Any = v for k_pre, tensors in capture_qkv_weight.items(): if None in tensors: raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" ) lowercase : List[str] = textenc_pattern.sub(lambda A : protected[re.escape(m.group(0 ) )] ,A ) lowercase : List[Any] = torch.cat(A ) for k_pre, tensors in capture_qkv_bias.items(): if None in tensors: raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" ) lowercase : Tuple = textenc_pattern.sub(lambda A : protected[re.escape(m.group(0 ) )] ,A ) lowercase : int = torch.cat(A ) return new_state_dict def _A ( A ) -> Union[str, Any]: return text_enc_dict if __name__ == "__main__": lowerCAmelCase : Dict = argparse.ArgumentParser() parser.add_argument("""--model_path""", default=None, type=str, required=True, help="""Path to the model to convert.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the output model.""") parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""") parser.add_argument( """--use_safetensors""", action="""store_true""", help="""Save weights use safetensors, default is ckpt.""" ) lowerCAmelCase : List[str] = parser.parse_args() assert args.model_path is not None, "Must provide a model path!" assert args.checkpoint_path is not None, "Must provide a checkpoint path!" # Path for safetensors lowerCAmelCase : List[str] = osp.join(args.model_path, """unet""", """diffusion_pytorch_model.safetensors""") lowerCAmelCase : List[Any] = osp.join(args.model_path, """vae""", """diffusion_pytorch_model.safetensors""") lowerCAmelCase : Optional[Any] = osp.join(args.model_path, """text_encoder""", """model.safetensors""") # Load models from safetensors if it exists, if it doesn't pytorch if osp.exists(unet_path): lowerCAmelCase : str = load_file(unet_path, device="""cpu""") else: lowerCAmelCase : List[str] = osp.join(args.model_path, """unet""", """diffusion_pytorch_model.bin""") lowerCAmelCase : Any = torch.load(unet_path, map_location="""cpu""") if osp.exists(vae_path): lowerCAmelCase : Dict = load_file(vae_path, device="""cpu""") else: lowerCAmelCase : Optional[int] = osp.join(args.model_path, """vae""", """diffusion_pytorch_model.bin""") lowerCAmelCase : Dict = torch.load(vae_path, map_location="""cpu""") if osp.exists(text_enc_path): lowerCAmelCase : List[Any] = load_file(text_enc_path, device="""cpu""") else: lowerCAmelCase : Optional[Any] = osp.join(args.model_path, """text_encoder""", """pytorch_model.bin""") lowerCAmelCase : Optional[Any] = torch.load(text_enc_path, map_location="""cpu""") # Convert the UNet model lowerCAmelCase : Any = convert_unet_state_dict(unet_state_dict) lowerCAmelCase : Union[str, Any] = {"""model.diffusion_model.""" + k: v for k, v in unet_state_dict.items()} # Convert the VAE model lowerCAmelCase : Any = convert_vae_state_dict(vae_state_dict) lowerCAmelCase : Any = {"""first_stage_model.""" + k: v for k, v in vae_state_dict.items()} # Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper lowerCAmelCase : Optional[Any] = """text_model.encoder.layers.22.layer_norm2.bias""" in text_enc_dict if is_vaa_model: # Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm lowerCAmelCase : Optional[Any] = {"""transformer.""" + k: v for k, v in text_enc_dict.items()} lowerCAmelCase : List[str] = convert_text_enc_state_dict_vaa(text_enc_dict) lowerCAmelCase : Any = {"""cond_stage_model.model.""" + k: v for k, v in text_enc_dict.items()} else: lowerCAmelCase : str = convert_text_enc_state_dict(text_enc_dict) lowerCAmelCase : Optional[Any] = {"""cond_stage_model.transformer.""" + k: v for k, v in text_enc_dict.items()} # Put together new checkpoint lowerCAmelCase : List[str] = {**unet_state_dict, **vae_state_dict, **text_enc_dict} if args.half: lowerCAmelCase : Dict = {k: v.half() for k, v in state_dict.items()} if args.use_safetensors: save_file(state_dict, args.checkpoint_path) else: lowerCAmelCase : Any = {"""state_dict""": state_dict} torch.save(state_dict, args.checkpoint_path)
425
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase = logging.get_logger(__name__) __lowerCAmelCase = { # See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert } class __SCREAMING_SNAKE_CASE (__A ): """simple docstring""" _a : int = '''megatron-bert''' def __init__( self , UpperCamelCase__=29_056 , UpperCamelCase__=1_024 , UpperCamelCase__=24 , UpperCamelCase__=16 , UpperCamelCase__=4_096 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=2 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-12 , UpperCamelCase__=0 , UpperCamelCase__="absolute" , UpperCamelCase__=True , **UpperCamelCase__ , ): """simple docstring""" super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ ) a_ = vocab_size a_ = hidden_size a_ = num_hidden_layers a_ = num_attention_heads a_ = hidden_act a_ = intermediate_size a_ = hidden_dropout_prob a_ = attention_probs_dropout_prob a_ = max_position_embeddings a_ = type_vocab_size a_ = initializer_range a_ = layer_norm_eps a_ = position_embedding_type a_ = use_cache
536
'''simple docstring''' import os import sys import unittest __lowerCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) __lowerCAmelCase = os.path.join("tests", "models", "bert", "test_modeling_bert.py") __lowerCAmelCase = os.path.join("tests", "models", "blip", "test_modeling_blip.py") class __SCREAMING_SNAKE_CASE (unittest.TestCase ): """simple docstring""" def _a ( self ): """simple docstring""" a_ = get_test_to_tester_mapping(UpperCamelCase__ ) a_ = get_test_to_tester_mapping(UpperCamelCase__ ) a_ = {'BertModelTest': 'BertModelTester'} a_ = { 'BlipModelTest': 'BlipModelTester', 'BlipTextImageModelTest': 'BlipTextImageModelsModelTester', 'BlipTextModelTest': 'BlipTextModelTester', 'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester', 'BlipVQAModelTest': 'BlipVQAModelTester', 'BlipVisionModelTest': 'BlipVisionModelTester', } self.assertEqual(get_test_info.to_json(UpperCamelCase__ ) , UpperCamelCase__ ) self.assertEqual(get_test_info.to_json(UpperCamelCase__ ) , UpperCamelCase__ ) def _a ( self ): """simple docstring""" a_ = get_model_to_test_mapping(UpperCamelCase__ ) a_ = get_model_to_test_mapping(UpperCamelCase__ ) a_ = { 'BertForMaskedLM': ['BertModelTest'], 'BertForMultipleChoice': ['BertModelTest'], 'BertForNextSentencePrediction': ['BertModelTest'], 'BertForPreTraining': ['BertModelTest'], 'BertForQuestionAnswering': ['BertModelTest'], 'BertForSequenceClassification': ['BertModelTest'], 'BertForTokenClassification': ['BertModelTest'], 'BertLMHeadModel': ['BertModelTest'], 'BertModel': ['BertModelTest'], } a_ = { 'BlipForConditionalGeneration': ['BlipTextImageModelTest'], 'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'], 'BlipForQuestionAnswering': ['BlipVQAModelTest'], 'BlipModel': ['BlipModelTest'], 'BlipTextModel': ['BlipTextModelTest'], 'BlipVisionModel': ['BlipVisionModelTest'], } self.assertEqual(get_test_info.to_json(UpperCamelCase__ ) , UpperCamelCase__ ) self.assertEqual(get_test_info.to_json(UpperCamelCase__ ) , UpperCamelCase__ ) def _a ( self ): """simple docstring""" a_ = get_model_to_tester_mapping(UpperCamelCase__ ) a_ = get_model_to_tester_mapping(UpperCamelCase__ ) a_ = { 'BertForMaskedLM': ['BertModelTester'], 'BertForMultipleChoice': ['BertModelTester'], 'BertForNextSentencePrediction': ['BertModelTester'], 'BertForPreTraining': ['BertModelTester'], 'BertForQuestionAnswering': ['BertModelTester'], 'BertForSequenceClassification': ['BertModelTester'], 'BertForTokenClassification': ['BertModelTester'], 'BertLMHeadModel': ['BertModelTester'], 'BertModel': ['BertModelTester'], } a_ = { 'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'], 'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'], 'BlipForQuestionAnswering': ['BlipVQAModelTester'], 'BlipModel': ['BlipModelTester'], 'BlipTextModel': ['BlipTextModelTester'], 'BlipVisionModel': ['BlipVisionModelTester'], } self.assertEqual(get_test_info.to_json(UpperCamelCase__ ) , UpperCamelCase__ ) self.assertEqual(get_test_info.to_json(UpperCamelCase__ ) , UpperCamelCase__ )
536
1
import functools import gc import inspect import torch from .imports import is_npu_available, is_xpu_available def UpperCamelCase_ ( *__a ) -> Optional[int]: if not isinstance(__a , __a ): a__ : int = list(__a ) for i in range(len(__a ) ): a__ : Any = None gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() return objects def UpperCamelCase_ ( __a ) -> bool: a__ : Tuple = [ "CUDA out of memory.", # CUDA OOM "cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.", # CUDNN SNAFU "DefaultCPUAllocator: can't allocate memory", # CPU OOM ] if isinstance(__a , __a ) and len(exception.args ) == 1: return any(err in exception.args[0] for err in _statements ) return False def UpperCamelCase_ ( __a = None , __a = 128 ) -> List[Any]: if function is None: return functools.partial(__a , starting_batch_size=__a ) a__ : Union[str, Any] = starting_batch_size def decorator(*__a , **__a ): nonlocal batch_size gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() a__ : int = list(inspect.signature(__a ).parameters.keys() ) # Guard against user error if len(__a ) < (len(__a ) + 1): a__ : Optional[Any] = ", ".join([f'''{arg}={value}''' for arg, value in zip(params[1:] , args[1:] )] ) raise TypeError( f'''Batch size was passed into `{function.__name__}` as the first argument when called.''' f'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' ) while True: if batch_size == 0: raise RuntimeError("No executable batch size found, reached zero." ) try: return function(__a , *__a , **__a ) except Exception as e: if should_reduce_batch_size(__a ): gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() batch_size //= 2 else: raise return decorator
713
import argparse import dataclasses import json import logging import os import shutil from typing import List, Optional import datasets from accelerate import Accelerator from datasets import load_dataset from finetuning import finetune from tqdm.auto import tqdm import transformers from transformers import AutoConfig, set_seed from transformers.trainer_utils import IntervalStrategy UpperCamelCase : Optional[Any] = logging.getLogger(__name__) UpperCamelCase : Any = """pytorch_model.bin""" @dataclasses.dataclass class A__ : """simple docstring""" _lowercase = dataclasses.field( metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models.'} ) _lowercase = dataclasses.field( default=A__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co.'} , ) @dataclasses.dataclass class A__ : """simple docstring""" _lowercase = dataclasses.field(metadata={'help': 'A csv or a json file containing the training data.'} ) _lowercase = dataclasses.field(metadata={'help': 'A csv or a json file containing the data to predict on.'} ) _lowercase = dataclasses.field( default=A__ , metadata={'help': 'A csv or a json file containing the validation data.'} ) _lowercase = dataclasses.field( default=A__ , metadata={'help': 'The name of the task to train on.'} , ) _lowercase = dataclasses.field( default=A__ , metadata={'help': 'The list of labels for the task.'} ) @dataclasses.dataclass class A__ : """simple docstring""" _lowercase = dataclasses.field( metadata={'help': 'The output directory where the model predictions and checkpoints will be written.'} ) _lowercase = dataclasses.field( default='accuracy' , metadata={'help': 'The evaluation metric used for the task.'} ) _lowercase = dataclasses.field( default='no' , metadata={ 'help': 'The evaluation strategy to adopt during training. Possible values are: ["no", "step", "epoch]' } , ) _lowercase = dataclasses.field( default=1_0 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , ) _lowercase = dataclasses.field( default=0.0 , metadata={ 'help': 'How much the specified evaluation metric must improve to satisfy early stopping conditions.' } , ) _lowercase = dataclasses.field( default=A__ , metadata={'help': 'Whether to filter the pseudo-labeled data based on the confidence score.'} , ) _lowercase = dataclasses.field( default=A__ , metadata={'help': 'Whether to filter the pseudo-labeled data based on the validation performance.'} , ) _lowercase = dataclasses.field( default=A__ , metadata={'help': 'Whether to fine-tune on labeled data after pseudo training.'} , ) _lowercase = dataclasses.field( default=0.0 , metadata={'help': 'Confidence threshold for pseudo-labeled data filtering.'} , ) _lowercase = dataclasses.field( default=1_0_0 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , ) _lowercase = dataclasses.field( default=A__ , metadata={'help': 'Random seed for initialization.'} , ) def UpperCamelCase_ ( __a , __a , __a , __a , __a , __a ) -> Optional[Any]: a__ : Any = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 ) if args.do_filter_by_confidence: a__ : str = dataset.filter(lambda __a : example["probability"] > args.confidence_threshold ) if args.do_filter_by_val_performance: assert eval_result >= 0.0 and eval_result <= 1.0 a__ : Tuple = int(eval_result * len(__a ) ) print(__a ) a__ : Optional[Any] = dataset.sort("probability" , reverse=__a ) a__ : Optional[int] = dataset.select(range(__a ) ) a__ : List[str] = dataset.remove_columns(["label", "probability"] ) a__ : Union[str, Any] = dataset.rename_column("prediction" , "label" ) a__ : int = dataset.map(lambda __a : {"label": idalabel[example["label"]]} ) a__ : Optional[int] = dataset.shuffle(seed=args.seed ) a__ : str = os.path.join(__a , f'''train_pseudo.{args.data_file_extension}''' ) if args.data_file_extension == "csv": dataset.to_csv(__a , index=__a ) else: dataset.to_json(__a ) def UpperCamelCase_ ( __a , __a , __a , __a , **__a ) -> Dict: a__ : List[str] = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , ) logger.info(accelerator.state ) # Setup logging, we only want one process per machine to log things on the # screen. accelerator.is_local_main_process is only True for one process per # machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR ) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() a__ : int = STModelArguments(model_name_or_path=__a ) a__ : Optional[int] = STDataArguments(train_file=__a , infer_file=__a ) a__ : List[Any] = STTrainingArguments(output_dir=__a ) a__ : Union[str, Any] = argparse.Namespace() for arg_class in (model_args, data_args, training_args): for key, value in vars(__a ).items(): setattr(__a , __a , __a ) for key, value in kwargs.items(): if hasattr(__a , __a ): setattr(__a , __a , __a ) # Sanity checks a__ : List[Any] = {} a__ : Optional[Any] = None # You need to provide the training data and the data to predict on assert args.train_file is not None assert args.infer_file is not None a__ : Union[str, Any] = args.train_file a__ : List[str] = args.infer_file if args.evaluation_strategy != IntervalStrategy.NO.value: assert args.eval_file is not None a__ : Tuple = args.eval_file for key in data_files: a__ : Optional[Any] = data_files[key].split("." )[-1] assert extension in ["csv", "json"], f'''`{key}_file` should be a csv or a json file.''' if args.data_file_extension is None: a__ : List[Any] = extension else: assert extension == args.data_file_extension, f'''`{key}_file` should be a {args.data_file_extension} file`.''' assert ( args.eval_metric in datasets.list_metrics() ), f'''{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.''' # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed ) logger.info("Creating the initial data directory for self-training..." ) a__ : Any = f'''{args.output_dir}/self-train_iter-{{}}'''.format a__ : List[str] = data_dir_format(0 ) if accelerator.is_main_process: if args.output_dir is not None: os.makedirs(args.output_dir , exist_ok=__a ) os.makedirs(__a , exist_ok=__a ) accelerator.wait_for_everyone() a__ : Optional[int] = None a__ : str = None a__ : List[Any] = 0 a__ : List[Any] = False # Show the progress bar a__ : Any = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process ) # Self-train for iteration in range(0 , int(args.max_selftrain_iterations ) ): a__ : Optional[int] = data_dir_format(__a ) assert os.path.exists(__a ) # Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for # iteration > 0 a__ : Union[str, Any] = os.path.join(__a , "stage-1" ) a__ : str = { "accelerator": accelerator, "model_name_or_path": args.model_name_or_path, "cache_dir": args.cache_dir, "do_train": True, "train_file": data_files["train"] if iteration == 0 else data_files["train_pseudo"], "do_eval": True if args.eval_file is not None else False, "eval_file": data_files["eval"], "do_predict": True, "infer_file": data_files["infer"], "task_name": args.task_name, "label_list": args.label_list, "output_dir": current_output_dir, "eval_metric": args.eval_metric, "evaluation_strategy": args.evaluation_strategy, "early_stopping_patience": args.early_stopping_patience, "early_stopping_threshold": args.early_stopping_threshold, "seed": args.seed, } # Add additional training arguments for key, value in kwargs.items(): if key not in arguments_dict and not hasattr(__a , __a ): arguments_dict.update({key: value} ) a__ : Tuple = os.path.join(__a , "best-checkpoint" , __a ) if os.path.exists(__a ): logger.info( "Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1." , __a , __a , ) else: logger.info("***** Running self-training: iteration: %d, stage: 1 *****" , __a ) finetune(**__a ) accelerator.wait_for_everyone() assert os.path.exists(__a ) logger.info("Self-training job completed: iteration: %d, stage: 1." , __a ) if iteration > 0 and args.finetune_on_labeled_data: # Stage 2 (optional): fine-tuning on the original labeled data a__ : Any = os.path.join(__a , "best-checkpoint" ) a__ : Optional[int] = os.path.join(__a , "stage-2" ) # Update arguments_dict a__ : Union[str, Any] = model_path a__ : Union[str, Any] = data_files["train"] a__ : Optional[Any] = current_output_dir a__ : Optional[int] = os.path.join(__a , "best-checkpoint" , __a ) if os.path.exists(__a ): logger.info( "Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2." , __a , __a , ) else: logger.info("***** Running self-training: iteration: %d, stage: 2 *****" , __a ) finetune(**__a ) accelerator.wait_for_everyone() assert os.path.exists(__a ) logger.info("Self-training job completed: iteration: %d, stage: 2." , __a ) a__ : Dict = iteration a__ : List[str] = data_dir_format(iteration + 1 ) a__ : Union[str, Any] = AutoConfig.from_pretrained(os.path.join(__a , "best-checkpoint" ) ) a__ : str = config.idalabel a__ : Union[str, Any] = os.path.join(__a , "eval_results_best-checkpoint.json" ) a__ : Dict = os.path.join(__a , "test_results_best-checkpoint.json" ) assert os.path.exists(__a ) with open(__a , "r" ) as f: a__ : Optional[int] = float(json.load(__a )[args.eval_metric] ) a__ : Union[str, Any] = os.path.join(__a , "infer_output_best-checkpoint.csv" ) assert os.path.exists(__a ) # Loading the dataset from local csv or json files. a__ : List[Any] = load_dataset(args.data_file_extension , data_files={"data": data_files["infer"]} )["data"] a__ : Dict = load_dataset("csv" , data_files={"data": infer_output_file} )["data"] if accelerator.is_main_process: os.makedirs(__a , exist_ok=__a ) shutil.copy(__a , os.path.join(__a , f'''eval_results_iter-{iteration}.json''' ) ) if os.path.exists(__a ): shutil.copy(__a , os.path.join(__a , f'''test_results_iter-{iteration}.json''' ) ) create_pseudo_labeled_data(__a , __a , __a , __a , __a , __a ) accelerator.wait_for_everyone() a__ : Optional[int] = os.path.join(__a , f'''train_pseudo.{args.data_file_extension}''' ) if args.evaluation_strategy != IntervalStrategy.NO.value: a__ : str = eval_result if best_iteration is None: a__ : Union[str, Any] = new_iteration a__ : Dict = new_eval_result else: if new_eval_result - best_eval_result > args.early_stopping_threshold: a__ : List[str] = new_iteration a__ : List[Any] = new_eval_result a__ : Dict = 0 else: if new_eval_result == best_eval_result: a__ : Optional[int] = new_iteration a__ : Any = new_eval_result early_stopping_patience_counter += 1 if early_stopping_patience_counter >= args.early_stopping_patience: a__ : Dict = True progress_bar.update(1 ) if should_training_stop: break if best_iteration is not None: # Save the best iteration logger.info("Best iteration: %d" , __a ) logger.info("Best evaluation result: %s = %f" , args.eval_metric , __a ) accelerator.wait_for_everyone() if accelerator.is_main_process: shutil.copy( os.path.join(__a , f'''eval_results_iter-{iteration}.json''' ) , os.path.join(__a , "eval_results_best-iteration.json" ) , ) else: # Assume that the last iteration is the best logger.info("Best iteration: %d" , args.max_selftrain_iterations - 1 ) logger.info("Best evaluation result: %s = %f" , args.eval_metric , __a ) accelerator.wait_for_everyone() if accelerator.is_main_process: shutil.copy( os.path.join(__a , f'''eval_results_iter-{args.max_selftrain_iterations - 1}.json''' ) , os.path.join(__a , "eval_results_best-iteration.json" ) , )
151
0
"""simple docstring""" import webbrowser from sys import argv from urllib.parse import parse_qs, quote import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": UpperCAmelCase = """%20""".join(argv[1:]) if len(argv) > 1 else quote(str(input("""Search: """))) print("""Googling.....""") UpperCAmelCase = f'''https://www.google.com/search?q={query}&num=100''' UpperCAmelCase = requests.get( url, headers={"""User-Agent""": str(UserAgent().random)}, ) try: UpperCAmelCase = ( BeautifulSoup(res.text, """html.parser""") .find("""div""", attrs={"""class""": """yuRUbf"""}) .find("""a""") .get("""href""") ) except AttributeError: UpperCAmelCase = parse_qs( BeautifulSoup(res.text, """html.parser""") .find("""div""", attrs={"""class""": """kCrYT"""}) .find("""a""") .get("""href""") )["""url"""][0] webbrowser.open(link)
88
from __future__ import annotations import unittest from transformers import is_tf_available, is_torch_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow if is_tf_available(): from transformers import ( AutoConfig, BertConfig, GPTaConfig, TaConfig, TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeqaSeqLM, TFAutoModelForSequenceClassification, TFAutoModelWithLMHead, TFBertForMaskedLM, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertModel, TFGPTaLMHeadModel, TFRobertaForMaskedLM, TFTaForConditionalGeneration, ) from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST if is_torch_available(): from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelWithLMHead, BertForMaskedLM, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertModel, GPTaLMHeadModel, RobertaForMaskedLM, TaForConditionalGeneration, ) @is_pt_tf_cross_test class lowerCAmelCase ( unittest.TestCase ): @slow def UpperCAmelCase ( self :List[str] ): '''simple docstring''' for model_name in ["bert-base-uncased"]: lowercase__ = AutoConfig.from_pretrained(_lowercase ) self.assertIsNotNone(_lowercase ) self.assertIsInstance(_lowercase , _lowercase ) lowercase__ = TFAutoModel.from_pretrained(_lowercase , from_pt=_lowercase ) self.assertIsNotNone(_lowercase ) self.assertIsInstance(_lowercase , _lowercase ) lowercase__ = AutoModel.from_pretrained(_lowercase , from_tf=_lowercase ) self.assertIsNotNone(_lowercase ) self.assertIsInstance(_lowercase , _lowercase ) @slow def UpperCAmelCase ( self :List[str] ): '''simple docstring''' for model_name in ["bert-base-uncased"]: lowercase__ = AutoConfig.from_pretrained(_lowercase ) self.assertIsNotNone(_lowercase ) self.assertIsInstance(_lowercase , _lowercase ) lowercase__ = TFAutoModelForPreTraining.from_pretrained(_lowercase , from_pt=_lowercase ) self.assertIsNotNone(_lowercase ) self.assertIsInstance(_lowercase , _lowercase ) lowercase__ = AutoModelForPreTraining.from_pretrained(_lowercase , from_tf=_lowercase ) self.assertIsNotNone(_lowercase ) self.assertIsInstance(_lowercase , _lowercase ) @slow def UpperCAmelCase ( self :Tuple ): '''simple docstring''' for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ = AutoConfig.from_pretrained(_lowercase ) self.assertIsNotNone(_lowercase ) self.assertIsInstance(_lowercase , _lowercase ) lowercase__ = TFAutoModelForCausalLM.from_pretrained(_lowercase , from_pt=_lowercase ) lowercase__ , lowercase__ = TFAutoModelForCausalLM.from_pretrained( _lowercase , output_loading_info=_lowercase , from_pt=_lowercase ) self.assertIsNotNone(_lowercase ) self.assertIsInstance(_lowercase , _lowercase ) lowercase__ = AutoModelForCausalLM.from_pretrained(_lowercase , from_tf=_lowercase ) lowercase__ , lowercase__ = AutoModelForCausalLM.from_pretrained( _lowercase , output_loading_info=_lowercase , from_tf=_lowercase ) self.assertIsNotNone(_lowercase ) self.assertIsInstance(_lowercase , _lowercase ) @slow def UpperCAmelCase ( self :Optional[Any] ): '''simple docstring''' for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ = AutoConfig.from_pretrained(_lowercase ) self.assertIsNotNone(_lowercase ) self.assertIsInstance(_lowercase , _lowercase ) lowercase__ = TFAutoModelWithLMHead.from_pretrained(_lowercase , from_pt=_lowercase ) self.assertIsNotNone(_lowercase ) self.assertIsInstance(_lowercase , _lowercase ) lowercase__ = AutoModelWithLMHead.from_pretrained(_lowercase , from_tf=_lowercase ) self.assertIsNotNone(_lowercase ) self.assertIsInstance(_lowercase , _lowercase ) @slow def UpperCAmelCase ( self :List[str] ): '''simple docstring''' for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ = AutoConfig.from_pretrained(_lowercase ) self.assertIsNotNone(_lowercase ) self.assertIsInstance(_lowercase , _lowercase ) lowercase__ = TFAutoModelForMaskedLM.from_pretrained(_lowercase , from_pt=_lowercase ) lowercase__ , lowercase__ = TFAutoModelForMaskedLM.from_pretrained( _lowercase , output_loading_info=_lowercase , from_pt=_lowercase ) self.assertIsNotNone(_lowercase ) self.assertIsInstance(_lowercase , _lowercase ) lowercase__ = AutoModelForMaskedLM.from_pretrained(_lowercase , from_tf=_lowercase ) lowercase__ , lowercase__ = AutoModelForMaskedLM.from_pretrained( _lowercase , output_loading_info=_lowercase , from_tf=_lowercase ) self.assertIsNotNone(_lowercase ) self.assertIsInstance(_lowercase , _lowercase ) @slow def UpperCAmelCase ( self :Tuple ): '''simple docstring''' for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ = AutoConfig.from_pretrained(_lowercase ) self.assertIsNotNone(_lowercase ) self.assertIsInstance(_lowercase , _lowercase ) lowercase__ = TFAutoModelForSeqaSeqLM.from_pretrained(_lowercase , from_pt=_lowercase ) lowercase__ , lowercase__ = TFAutoModelForSeqaSeqLM.from_pretrained( _lowercase , output_loading_info=_lowercase , from_pt=_lowercase ) self.assertIsNotNone(_lowercase ) self.assertIsInstance(_lowercase , _lowercase ) lowercase__ = AutoModelForSeqaSeqLM.from_pretrained(_lowercase , from_tf=_lowercase ) lowercase__ , lowercase__ = AutoModelForSeqaSeqLM.from_pretrained( _lowercase , output_loading_info=_lowercase , from_tf=_lowercase ) self.assertIsNotNone(_lowercase ) self.assertIsInstance(_lowercase , _lowercase ) @slow def UpperCAmelCase ( self :str ): '''simple docstring''' for model_name in ["bert-base-uncased"]: lowercase__ = AutoConfig.from_pretrained(_lowercase ) self.assertIsNotNone(_lowercase ) self.assertIsInstance(_lowercase , _lowercase ) lowercase__ = TFAutoModelForSequenceClassification.from_pretrained(_lowercase , from_pt=_lowercase ) self.assertIsNotNone(_lowercase ) self.assertIsInstance(_lowercase , _lowercase ) lowercase__ = AutoModelForSequenceClassification.from_pretrained(_lowercase , from_tf=_lowercase ) self.assertIsNotNone(_lowercase ) self.assertIsInstance(_lowercase , _lowercase ) @slow def UpperCAmelCase ( self :str ): '''simple docstring''' for model_name in ["bert-base-uncased"]: lowercase__ = AutoConfig.from_pretrained(_lowercase ) self.assertIsNotNone(_lowercase ) self.assertIsInstance(_lowercase , _lowercase ) lowercase__ = TFAutoModelForQuestionAnswering.from_pretrained(_lowercase , from_pt=_lowercase ) self.assertIsNotNone(_lowercase ) self.assertIsInstance(_lowercase , _lowercase ) lowercase__ = AutoModelForQuestionAnswering.from_pretrained(_lowercase , from_tf=_lowercase ) self.assertIsNotNone(_lowercase ) self.assertIsInstance(_lowercase , _lowercase ) def UpperCAmelCase ( self :List[Any] ): '''simple docstring''' lowercase__ = TFAutoModelWithLMHead.from_pretrained(_lowercase , from_pt=_lowercase ) self.assertIsInstance(_lowercase , _lowercase ) self.assertEqual(model.num_parameters() , 1_44_10 ) self.assertEqual(model.num_parameters(only_trainable=_lowercase ) , 1_44_10 ) lowercase__ = AutoModelWithLMHead.from_pretrained(_lowercase , from_tf=_lowercase ) self.assertIsInstance(_lowercase , _lowercase ) self.assertEqual(model.num_parameters() , 1_44_10 ) self.assertEqual(model.num_parameters(only_trainable=_lowercase ) , 1_44_10 ) def UpperCAmelCase ( self :List[str] ): '''simple docstring''' lowercase__ = TFAutoModelWithLMHead.from_pretrained(_lowercase , from_pt=_lowercase ) self.assertIsInstance(_lowercase , _lowercase ) self.assertEqual(model.num_parameters() , 1_44_10 ) self.assertEqual(model.num_parameters(only_trainable=_lowercase ) , 1_44_10 ) lowercase__ = AutoModelWithLMHead.from_pretrained(_lowercase , from_tf=_lowercase ) self.assertIsInstance(_lowercase , _lowercase ) self.assertEqual(model.num_parameters() , 1_44_10 ) self.assertEqual(model.num_parameters(only_trainable=_lowercase ) , 1_44_10 )
655
0
import unittest import numpy as np import torch from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' @property def __UpperCamelCase ( self ): '''simple docstring''' torch.manual_seed(0 ) __A =UNetaDModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , ) return model def __UpperCamelCase ( self ): '''simple docstring''' __A =self.dummy_uncond_unet __A =PNDMScheduler() __A =PNDMPipeline(unet=lowercase__ , scheduler=lowercase__ ) pndm.to(lowercase__ ) pndm.set_progress_bar_config(disable=lowercase__ ) __A =torch.manual_seed(0 ) __A =pndm(generator=lowercase__ , num_inference_steps=2_0 , output_type='''numpy''' ).images __A =torch.manual_seed(0 ) __A =pndm(generator=lowercase__ , num_inference_steps=2_0 , output_type='''numpy''' , return_dict=lowercase__ )[0] __A =image[0, -3:, -3:, -1] __A =image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 3_2, 3_2, 3) __A =np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def __UpperCamelCase ( self ): '''simple docstring''' __A ='''google/ddpm-cifar10-32''' __A =UNetaDModel.from_pretrained(lowercase__ ) __A =PNDMScheduler() __A =PNDMPipeline(unet=lowercase__ , scheduler=lowercase__ ) pndm.to(lowercase__ ) pndm.set_progress_bar_config(disable=lowercase__ ) __A =torch.manual_seed(0 ) __A =pndm(generator=lowercase__ , output_type='''numpy''' ).images __A =image[0, -3:, -3:, -1] assert image.shape == (1, 3_2, 3_2, 3) __A =np.array([0.1564, 0.1_4645, 0.1406, 0.1_4715, 0.1_2425, 0.1_4045, 0.1_3115, 0.1_2175, 0.125] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
712
from collections import OrderedDict from typing import TYPE_CHECKING, Any, List, Mapping, Optional from packaging import version if TYPE_CHECKING: from ... import PreTrainedTokenizer, TensorType from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import is_torch_available, logging _lowerCamelCase : Tuple = logging.get_logger(__name__) _lowerCamelCase : Tuple = { '''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/resolve/main/config.json''', '''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/config.json''', '''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json''', '''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json''', '''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/config.json''', '''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json''', } class lowerCAmelCase__ ( __magic_name__ ): '''simple docstring''' lowercase_ = """bloom""" lowercase_ = ["""past_key_values"""] lowercase_ = { """num_hidden_layers""": """n_layer""", """num_attention_heads""": """n_head""", } def __init__( self , lowercase__=2_5_0_8_8_0 , lowercase__=6_4 , lowercase__=2 , lowercase__=8 , lowercase__=1E-5 , lowercase__=0.02 , lowercase__=True , lowercase__=1 , lowercase__=2 , lowercase__=False , lowercase__=0.0 , lowercase__=0.0 , lowercase__=1 , lowercase__=False , **lowercase__ , ): '''simple docstring''' __A =vocab_size # Backward compatibility with n_embed kwarg __A =kwargs.pop('''n_embed''' , lowercase__ ) __A =hidden_size if n_embed is None else n_embed __A =n_layer __A =n_head __A =layer_norm_epsilon __A =initializer_range __A =use_cache __A =pretraining_tp __A =apply_residual_connection_post_layernorm __A =hidden_dropout __A =attention_dropout __A =bos_token_id __A =eos_token_id __A =slow_but_exact super().__init__(bos_token_id=lowercase__ , eos_token_id=lowercase__ , **lowercase__ ) class lowerCAmelCase__ ( __magic_name__ ): '''simple docstring''' lowercase_ = version.parse("""1.12""" ) def __init__( self , lowercase__ , lowercase__ = "default" , lowercase__ = None , lowercase__ = False , ): '''simple docstring''' super().__init__(lowercase__ , task=lowercase__ , patching_specs=lowercase__ , use_past=lowercase__ ) if not getattr(self._config , '''pad_token_id''' , lowercase__ ): # TODO: how to do that better? __A =0 @property def __UpperCamelCase ( self ): '''simple docstring''' __A =OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} ) if self.use_past: # BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344 self.fill_with_past_key_values_(lowercase__ , direction='''inputs''' , inverted_values_shape=lowercase__ ) __A ={0: '''batch''', 1: '''past_sequence + sequence'''} else: __A ={0: '''batch''', 1: '''sequence'''} return common_inputs @property def __UpperCamelCase ( self ): '''simple docstring''' return self._config.n_layer @property def __UpperCamelCase ( self ): '''simple docstring''' return self._config.n_head @property def __UpperCamelCase ( self ): '''simple docstring''' return 1E-3 def __UpperCamelCase ( self , lowercase__ , lowercase__ = -1 , lowercase__ = -1 , lowercase__ = False , lowercase__ = None , ): '''simple docstring''' __A =super(lowercase__ , self ).generate_dummy_inputs( lowercase__ , batch_size=lowercase__ , seq_length=lowercase__ , is_pair=lowercase__ , framework=lowercase__ ) # We need to order the input in the way they appears in the forward() __A =OrderedDict({'''input_ids''': common_inputs['''input_ids''']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch __A , __A =common_inputs['''input_ids'''].shape # Not using the same length for past_key_values __A =seqlen + 2 __A =self._config.hidden_size // self.num_attention_heads __A =( batch * self.num_attention_heads, head_dim, past_key_values_length, ) __A =( batch * self.num_attention_heads, past_key_values_length, head_dim, ) __A =[ (torch.zeros(lowercase__ ), torch.zeros(lowercase__ )) for _ in range(self.num_layers ) ] __A =common_inputs['''attention_mask'''] if self.use_past: __A =ordered_inputs['''attention_mask'''].dtype __A =torch.cat( [ordered_inputs['''attention_mask'''], torch.ones(lowercase__ , lowercase__ , dtype=lowercase__ )] , dim=1 ) return ordered_inputs @property def __UpperCamelCase ( self ): '''simple docstring''' return 1_3
516
0
from __future__ import annotations from fractions import Fraction from math import gcd, sqrt def a ( a ) ->bool: '''simple docstring''' SCREAMING_SNAKE_CASE = int(number**0.5 ) return number == sq * sq def a ( a , a , a , a , a , a ) ->tuple[int, int]: '''simple docstring''' SCREAMING_SNAKE_CASE = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den SCREAMING_SNAKE_CASE = x_den * y_den * z_den SCREAMING_SNAKE_CASE = gcd(a , a ) top //= hcf bottom //= hcf return top, bottom def a ( a = 35 ) ->int: '''simple docstring''' SCREAMING_SNAKE_CASE = set() SCREAMING_SNAKE_CASE = 42 SCREAMING_SNAKE_CASE = Fraction(0 ) SCREAMING_SNAKE_CASE = 42 for x_num in range(1 , order + 1 ): for x_den in range(x_num + 1 , order + 1 ): for y_num in range(1 , order + 1 ): for y_den in range(y_num + 1 , order + 1 ): # n=1 SCREAMING_SNAKE_CASE = x_num * y_den + x_den * y_num SCREAMING_SNAKE_CASE = x_den * y_den SCREAMING_SNAKE_CASE = gcd(a , a ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: SCREAMING_SNAKE_CASE = add_three( a , a , a , a , a , a ) unique_s.add(a ) # n=2 SCREAMING_SNAKE_CASE = ( x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num ) SCREAMING_SNAKE_CASE = x_den * x_den * y_den * y_den if is_sq(a ) and is_sq(a ): SCREAMING_SNAKE_CASE = int(sqrt(a ) ) SCREAMING_SNAKE_CASE = int(sqrt(a ) ) SCREAMING_SNAKE_CASE = gcd(a , a ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: SCREAMING_SNAKE_CASE = add_three( a , a , a , a , a , a ) unique_s.add(a ) # n=-1 SCREAMING_SNAKE_CASE = x_num * y_num SCREAMING_SNAKE_CASE = x_den * y_num + x_num * y_den SCREAMING_SNAKE_CASE = gcd(a , a ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: SCREAMING_SNAKE_CASE = add_three( a , a , a , a , a , a ) unique_s.add(a ) # n=2 SCREAMING_SNAKE_CASE = x_num * x_num * y_num * y_num SCREAMING_SNAKE_CASE = ( x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den ) if is_sq(a ) and is_sq(a ): SCREAMING_SNAKE_CASE = int(sqrt(a ) ) SCREAMING_SNAKE_CASE = int(sqrt(a ) ) SCREAMING_SNAKE_CASE = gcd(a , a ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: SCREAMING_SNAKE_CASE = add_three( a , a , a , a , a , a ) unique_s.add(a ) for num, den in unique_s: total += Fraction(a , a ) return total.denominator + total.numerator if __name__ == "__main__": print(F'''{solution() = }''')
201
from PIL import Image def a ( a ) ->Image: '''simple docstring''' SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = image.size SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = image.load() for i in range(a ): for j in range(a ): SCREAMING_SNAKE_CASE = pixels[j, i] mean += pixel mean //= width * height for j in range(a ): for i in range(a ): SCREAMING_SNAKE_CASE = 255 if pixels[i, j] > mean else 0 return image if __name__ == "__main__": __lowerCAmelCase = mean_threshold(Image.open('path_to_image').convert('L')) image.save('output_image_path')
201
1
"""simple docstring""" def __magic_name__ ( lowercase ): if len(lowercase ) < 2: return collection def circle_sort_util(lowercase , lowercase , lowercase ) -> bool: SCREAMING_SNAKE_CASE_: Optional[int] =False if low == high: return swapped SCREAMING_SNAKE_CASE_: Union[str, Any] =low SCREAMING_SNAKE_CASE_: Optional[int] =high while left < right: if collection[left] > collection[right]: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] =( collection[right], collection[left], ) SCREAMING_SNAKE_CASE_: List[Any] =True left += 1 right -= 1 if left == right and collection[left] > collection[right + 1]: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] =( collection[right + 1], collection[left], ) SCREAMING_SNAKE_CASE_: Tuple =True SCREAMING_SNAKE_CASE_: Optional[int] =low + int((high - low) / 2 ) SCREAMING_SNAKE_CASE_: List[str] =circle_sort_util(lowercase , lowercase , lowercase ) SCREAMING_SNAKE_CASE_: Any =circle_sort_util(lowercase , mid + 1 , lowercase ) return swapped or left_swap or right_swap SCREAMING_SNAKE_CASE_: Union[str, Any] =True while is_not_sorted is True: SCREAMING_SNAKE_CASE_: Dict =circle_sort_util(lowercase , 0 , len(lowercase ) - 1 ) return collection if __name__ == "__main__": _UpperCAmelCase = input("""Enter numbers separated by a comma:\n""").strip() _UpperCAmelCase = [int(item) for item in user_input.split(""",""")] print(circle_sort(unsorted))
36
"""simple docstring""" from __future__ import annotations def __magic_name__ ( lowercase , lowercase ): SCREAMING_SNAKE_CASE_: List[Any] =sorted(numsa + numsa ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple =divmod(len(lowercase ) , 2 ) if mod == 1: return all_numbers[div] else: return (all_numbers[div] + all_numbers[div - 1]) / 2 if __name__ == "__main__": import doctest doctest.testmod() _UpperCAmelCase = [float(x) for x in input("""Enter the elements of first array: """).split()] _UpperCAmelCase = [float(x) for x in input("""Enter the elements of second array: """).split()] print(f"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
36
1
import unittest from typing import Dict, List, Optional, Union import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BridgeTowerImageProcessor class __lowercase ( unittest.TestCase ): '''simple docstring''' def __init__( self : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : bool = True , UpperCamelCase_ : Dict[str, int] = None , UpperCamelCase_ : int = 32 , UpperCamelCase_ : bool = True , UpperCamelCase_ : Union[int, float] = 1 / 255 , UpperCamelCase_ : bool = True , UpperCamelCase_ : bool = True , UpperCamelCase_ : Optional[Union[float, List[float]]] = [0.48145466, 0.4578275, 0.40821073] , UpperCamelCase_ : Optional[Union[float, List[float]]] = [0.26862954, 0.26130258, 0.27577711] , UpperCamelCase_ : bool = True , UpperCamelCase_ : Union[str, Any]=7 , UpperCamelCase_ : Optional[int]=30 , UpperCamelCase_ : Tuple=400 , UpperCamelCase_ : List[Any]=3 , ): """simple docstring""" __A = parent __A = do_resize __A = size if size is not None else {"""shortest_edge""": 288} __A = size_divisor __A = do_rescale __A = rescale_factor __A = do_normalize __A = do_center_crop __A = image_mean __A = image_std __A = do_pad __A = batch_size __A = num_channels __A = min_resolution __A = max_resolution def lowerCAmelCase_ ( self : str ): """simple docstring""" return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "size_divisor": self.size_divisor, } def lowerCAmelCase_ ( self : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Union[str, Any]=False ): """simple docstring""" if not batched: __A = self.size["""shortest_edge"""] __A = image_inputs[0] if isinstance(__lowerCAmelCase , Image.Image ): __A , __A = image.size else: __A , __A = image.shape[1], image.shape[2] __A = size / min(__lowerCAmelCase , __lowerCAmelCase ) if h < w: __A , __A = size, scale * w else: __A , __A = scale * h, size __A = int((1_333 / 800) * size ) if max(__lowerCAmelCase , __lowerCAmelCase ) > max_size: __A = max_size / max(__lowerCAmelCase , __lowerCAmelCase ) __A = newh * scale __A = neww * scale __A , __A = int(newh + 0.5 ), int(neww + 0.5 ) __A , __A = ( newh // self.size_divisor * self.size_divisor, neww // self.size_divisor * self.size_divisor, ) else: __A = [] for image in image_inputs: __A , __A = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) __A = max(__lowerCAmelCase , key=lambda UpperCamelCase_ : item[0] )[0] __A = max(__lowerCAmelCase , key=lambda UpperCamelCase_ : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class __lowercase ( __a , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE = BridgeTowerImageProcessor if is_vision_available() else None def lowerCAmelCase_ ( self : List[str] ): """simple docstring""" __A = BridgeTowerImageProcessingTester(self ) @property def lowerCAmelCase_ ( self : Optional[Any] ): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase_ ( self : Any ): """simple docstring""" __A = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowerCAmelCase , """image_mean""" ) ) self.assertTrue(hasattr(__lowerCAmelCase , """image_std""" ) ) self.assertTrue(hasattr(__lowerCAmelCase , """do_normalize""" ) ) self.assertTrue(hasattr(__lowerCAmelCase , """do_resize""" ) ) self.assertTrue(hasattr(__lowerCAmelCase , """size""" ) ) self.assertTrue(hasattr(__lowerCAmelCase , """size_divisor""" ) ) def lowerCAmelCase_ ( self : Dict ): """simple docstring""" pass def lowerCAmelCase_ ( self : Union[str, Any] ): """simple docstring""" __A = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , Image.Image ) # Test not batched input __A = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values __A , __A = self.image_processor_tester.get_expected_values(__lowerCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __A = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values __A , __A = self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def lowerCAmelCase_ ( self : Tuple ): """simple docstring""" __A = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , np.ndarray ) # Test not batched input __A = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values __A , __A = self.image_processor_tester.get_expected_values(__lowerCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __A = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values __A , __A = self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def lowerCAmelCase_ ( self : str ): """simple docstring""" __A = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , torch.Tensor ) # Test not batched input __A = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values __A , __A = self.image_processor_tester.get_expected_values(__lowerCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __A = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values __A , __A = self.image_processor_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , )
637
'''simple docstring''' import inspect import jax import jax.lax as lax import jax.numpy as jnp from ..utils import add_start_docstrings from ..utils.logging import get_logger snake_case = get_logger(__name__) snake_case = R''' Args: input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`): Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam search or log softmax for each vocabulary token when using beam search kwargs (`Dict[str, Any]`, *optional*): Additional logits processor specific kwargs. Return: `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores. ''' class SCREAMING_SNAKE_CASE : """simple docstring""" @add_start_docstrings(__lowerCAmelCase ) def __call__( self : List[Any] , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : jnp.ndarray ): """simple docstring""" raise NotImplementedError( F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." ) class SCREAMING_SNAKE_CASE : """simple docstring""" @add_start_docstrings(__lowerCAmelCase ) def __call__( self : Any , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : jnp.ndarray ): """simple docstring""" raise NotImplementedError( F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." ) class SCREAMING_SNAKE_CASE ( __a ): """simple docstring""" @add_start_docstrings(__lowerCAmelCase ) def __call__( self : Optional[Any] , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : int , **__lowerCAmelCase : Tuple ): """simple docstring""" for processor in self: _lowerCAmelCase = inspect.signature(processor.__call__ ).parameters if len(__lowerCAmelCase ) > 3: if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ): raise ValueError( F"Make sure that all the required parameters: {list(function_args.keys() )} for " F"{processor.__class__} are passed to the logits processor." ) _lowerCAmelCase = processor(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ) else: _lowerCAmelCase = processor(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) return scores class SCREAMING_SNAKE_CASE ( __a ): """simple docstring""" def __init__( self : Any , __lowerCAmelCase : float ): """simple docstring""" if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not (temperature > 0): raise ValueError(F"`temperature` has to be a strictly positive float, but is {temperature}" ) _lowerCAmelCase = temperature def __call__( self : Optional[Any] , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : int ): """simple docstring""" _lowerCAmelCase = scores / self.temperature return scores class SCREAMING_SNAKE_CASE ( __a ): """simple docstring""" def __init__( self : Union[str, Any] , __lowerCAmelCase : float , __lowerCAmelCase : float = -float('Inf' ) , __lowerCAmelCase : int = 1 ): """simple docstring""" if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or (top_p < 0 or top_p > 1.0): raise ValueError(F"`top_p` has to be a float > 0 and < 1, but is {top_p}" ) if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or (min_tokens_to_keep < 1): raise ValueError(F"`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}" ) _lowerCAmelCase = top_p _lowerCAmelCase = filter_value _lowerCAmelCase = min_tokens_to_keep def __call__( self : int , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : int ): """simple docstring""" _lowerCAmelCase , _lowerCAmelCase = lax.top_k(__lowerCAmelCase , scores.shape[-1] ) _lowerCAmelCase = jnp.full_like(__lowerCAmelCase , self.filter_value ) _lowerCAmelCase = jax.nn.softmax(__lowerCAmelCase , axis=-1 ).cumsum(axis=-1 ) _lowerCAmelCase = cumulative_probs < self.top_p # include the token that is higher than top_p as well _lowerCAmelCase = jnp.roll(__lowerCAmelCase , 1 ) score_mask |= score_mask.at[:, 0].set(__lowerCAmelCase ) # min tokens to keep _lowerCAmelCase = score_mask.at[:, : self.min_tokens_to_keep].set(__lowerCAmelCase ) _lowerCAmelCase = jnp.where(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) _lowerCAmelCase = jax.lax.sort_key_val(__lowerCAmelCase , __lowerCAmelCase )[-1] return next_scores class SCREAMING_SNAKE_CASE ( __a ): """simple docstring""" def __init__( self : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : float = -float('Inf' ) , __lowerCAmelCase : int = 1 ): """simple docstring""" if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or top_k <= 0: raise ValueError(F"`top_k` has to be a strictly positive integer, but is {top_k}" ) _lowerCAmelCase = max(__lowerCAmelCase , __lowerCAmelCase ) _lowerCAmelCase = filter_value def __call__( self : Tuple , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : int ): """simple docstring""" _lowerCAmelCase , _lowerCAmelCase = scores.shape _lowerCAmelCase = jnp.full(batch_size * vocab_size , self.filter_value ) _lowerCAmelCase = min(self.top_k , scores.shape[-1] ) # Safety check _lowerCAmelCase , _lowerCAmelCase = lax.top_k(__lowerCAmelCase , __lowerCAmelCase ) _lowerCAmelCase = jnp.broadcast_to((jnp.arange(__lowerCAmelCase ) * vocab_size)[:, None] , (batch_size, topk) ).flatten() _lowerCAmelCase = topk_scores.flatten() _lowerCAmelCase = topk_indices.flatten() + shift _lowerCAmelCase = next_scores_flat.at[topk_indices_flat].set(__lowerCAmelCase ) _lowerCAmelCase = next_scores_flat.reshape(__lowerCAmelCase , __lowerCAmelCase ) return next_scores class SCREAMING_SNAKE_CASE ( __a ): """simple docstring""" def __init__( self : List[Any] , __lowerCAmelCase : int ): """simple docstring""" _lowerCAmelCase = bos_token_id def __call__( self : List[str] , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : int ): """simple docstring""" _lowerCAmelCase = jnp.full(scores.shape , -float('inf' ) ) _lowerCAmelCase = 1 - jnp.bool_(cur_len - 1 ) _lowerCAmelCase = jnp.where(__lowerCAmelCase , new_scores.at[:, self.bos_token_id].set(0 ) , __lowerCAmelCase ) return scores class SCREAMING_SNAKE_CASE ( __a ): """simple docstring""" def __init__( self : int , __lowerCAmelCase : int , __lowerCAmelCase : int ): """simple docstring""" _lowerCAmelCase = max_length _lowerCAmelCase = eos_token_id def __call__( self : Optional[int] , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : int ): """simple docstring""" _lowerCAmelCase = jnp.full(scores.shape , -float('inf' ) ) _lowerCAmelCase = 1 - jnp.bool_(cur_len - self.max_length + 1 ) _lowerCAmelCase = jnp.where(__lowerCAmelCase , new_scores.at[:, self.eos_token_id].set(0 ) , __lowerCAmelCase ) return scores class SCREAMING_SNAKE_CASE ( __a ): """simple docstring""" def __init__( self : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : int ): """simple docstring""" if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or min_length < 0: raise ValueError(F"`min_length` has to be a positive integer, but is {min_length}" ) if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or eos_token_id < 0: raise ValueError(F"`eos_token_id` has to be a positive integer, but is {eos_token_id}" ) _lowerCAmelCase = min_length _lowerCAmelCase = eos_token_id def __call__( self : str , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : int ): """simple docstring""" _lowerCAmelCase = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 ) _lowerCAmelCase = jnp.where(__lowerCAmelCase , scores.at[:, self.eos_token_id].set(-float('inf' ) ) , __lowerCAmelCase ) return scores class SCREAMING_SNAKE_CASE ( __a ): """simple docstring""" def __init__( self : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any] ): """simple docstring""" _lowerCAmelCase = list(__lowerCAmelCase ) _lowerCAmelCase = begin_index def __call__( self : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : int ): """simple docstring""" _lowerCAmelCase = 1 - jnp.bool_(cur_len - self.begin_index ) _lowerCAmelCase = jnp.where(__lowerCAmelCase , scores.at[:, self.begin_suppress_tokens].set(-float('inf' ) ) , __lowerCAmelCase ) return scores class SCREAMING_SNAKE_CASE ( __a ): """simple docstring""" def __init__( self : Optional[Any] , __lowerCAmelCase : list ): """simple docstring""" _lowerCAmelCase = list(__lowerCAmelCase ) def __call__( self : List[Any] , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : int ): """simple docstring""" _lowerCAmelCase = scores.at[..., self.suppress_tokens].set(-float('inf' ) ) return scores class SCREAMING_SNAKE_CASE ( __a ): """simple docstring""" def __init__( self : Tuple , __lowerCAmelCase : str ): """simple docstring""" _lowerCAmelCase = dict(__lowerCAmelCase ) # Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the # index of the array corresponds to the index of the token to be forced, for XLA compatibility. # Indexes without forced tokens will have a negative value. _lowerCAmelCase = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1 for index, token in force_token_map.items(): if token is not None: _lowerCAmelCase = force_token_array.at[index].set(__lowerCAmelCase ) _lowerCAmelCase = jnp.intaa(__lowerCAmelCase ) def __call__( self : str , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : int ): """simple docstring""" def _force_token(__lowerCAmelCase : int ): _lowerCAmelCase = scores.shape[0] _lowerCAmelCase = self.force_token_array[generation_idx] _lowerCAmelCase = jnp.ones_like(__lowerCAmelCase , dtype=scores.dtype ) * -float('inf' ) _lowerCAmelCase = jnp.zeros((batch_size, 1) , dtype=scores.dtype ) _lowerCAmelCase = lax.dynamic_update_slice(__lowerCAmelCase , __lowerCAmelCase , (0, current_token) ) return new_scores _lowerCAmelCase = lax.cond( cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond( self.force_token_array[cur_len] >= 0 , lambda: _force_token(__lowerCAmelCase ) , lambda: scores , ) , ) return scores class SCREAMING_SNAKE_CASE ( __a ): """simple docstring""" def __init__( self : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Any ): """simple docstring""" _lowerCAmelCase = generate_config.eos_token_id _lowerCAmelCase = generate_config.no_timestamps_token_id _lowerCAmelCase = generate_config.no_timestamps_token_id + 1 _lowerCAmelCase = decoder_input_length + 1 if generate_config.is_multilingual: # room for language token and task token self.begin_index += 2 if hasattr(__lowerCAmelCase , 'max_initial_timestamp_index' ): _lowerCAmelCase = generate_config.max_initial_timestamp_index else: _lowerCAmelCase = model_config.vocab_size if self.max_initial_timestamp_index is None: _lowerCAmelCase = model_config.vocab_size def __call__( self : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] ): """simple docstring""" _lowerCAmelCase = scores.at[:, self.no_timestamps_token_id].set(-float('inf' ) ) def handle_pairs(__lowerCAmelCase : Dict , __lowerCAmelCase : Dict ): _lowerCAmelCase = jnp.where((cur_len - self.begin_index) >= 1 , __lowerCAmelCase , __lowerCAmelCase ) _lowerCAmelCase = jnp.where( input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , __lowerCAmelCase , ) _lowerCAmelCase = jnp.where((cur_len - self.begin_index) < 2 , __lowerCAmelCase , __lowerCAmelCase ) _lowerCAmelCase = jnp.where( input_ids_k[cur_len - 2] >= self.timestamp_begin , __lowerCAmelCase , __lowerCAmelCase , ) return jnp.where( __lowerCAmelCase , jnp.where( penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float('inf' ) ) , scores_k.at[: self.eos_token_id].set(-float('inf' ) ) , ) , __lowerCAmelCase , ) _lowerCAmelCase = jax.vmap(__lowerCAmelCase )(__lowerCAmelCase , __lowerCAmelCase ) _lowerCAmelCase = jnp.where(cur_len == self.begin_index , __lowerCAmelCase , __lowerCAmelCase ) _lowerCAmelCase = jnp.where( self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , __lowerCAmelCase , ) _lowerCAmelCase = self.timestamp_begin + self.max_initial_timestamp_index _lowerCAmelCase = jnp.where( __lowerCAmelCase , scores.at[:, last_allowed + 1 :].set(-float('inf' ) ) , __lowerCAmelCase , ) # if sum of probability over timestamps is above any other token, sample timestamp _lowerCAmelCase = jax.nn.log_softmax(__lowerCAmelCase , axis=-1 ) def handle_cumulative_probs(__lowerCAmelCase : Any , __lowerCAmelCase : str ): _lowerCAmelCase = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 ) _lowerCAmelCase = jnp.max(logprobs_k[: self.timestamp_begin] ) return jnp.where( timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float('inf' ) ) , __lowerCAmelCase , ) _lowerCAmelCase = jax.vmap(__lowerCAmelCase )(__lowerCAmelCase , __lowerCAmelCase ) return scores
309
0
import numpy as np import torch from torch.nn import CrossEntropyLoss from transformers import AutoModelForCausalLM, AutoTokenizer import datasets from datasets import logging _a : List[str] = '\\n\n' _a : Any = '\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n' _a : List[Any] = '\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to \'cuda\' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 78.22\n >>> print(round(results["perplexities"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = datasets.load_dataset("wikitext",\n ... "wikitext-2-raw-v1",\n ... split="test")["text"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!=\'\']\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 60.35\n >>> print(round(results["perplexities"][0], 2))\n 81.12\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class UpperCamelCase_ ( datasets.Metric ): """simple docstring""" def lowerCamelCase_ ( self ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """input_texts""": datasets.Value("""string""" ), } ) , reference_urls=["""https://huggingface.co/docs/transformers/perplexity"""] , ) def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 1_6 , UpperCAmelCase = True , UpperCAmelCase=None ): if device is not None: assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu." if device == "gpu": __lowerCamelCase = """cuda""" else: __lowerCamelCase = """cuda""" if torch.cuda.is_available() else """cpu""" __lowerCamelCase = AutoModelForCausalLM.from_pretrained(UpperCAmelCase ) __lowerCamelCase = model.to(UpperCAmelCase ) __lowerCamelCase = AutoTokenizer.from_pretrained(UpperCAmelCase ) # if batch_size > 1 (which generally leads to padding being required), and # if there is not an already assigned pad_token, assign an existing # special token to also be the padding token if tokenizer.pad_token is None and batch_size > 1: __lowerCamelCase = list(tokenizer.special_tokens_map_extended.values() ) # check that the model already has at least one special token defined assert ( len(UpperCAmelCase ) > 0 ), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1." # assign one of the special tokens to also be the pad token tokenizer.add_special_tokens({"""pad_token""": existing_special_tokens[0]} ) if add_start_token: # leave room for <BOS> token to be added: assert ( tokenizer.bos_token is not None ), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False" __lowerCamelCase = model.config.max_length - 1 else: __lowerCamelCase = model.config.max_length __lowerCamelCase = tokenizer( UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , return_tensors="""pt""" , return_attention_mask=UpperCAmelCase , ).to(UpperCAmelCase ) __lowerCamelCase = encodings["""input_ids"""] __lowerCamelCase = encodings["""attention_mask"""] # check that each input is long enough: if add_start_token: assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long." else: assert torch.all( torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings." __lowerCamelCase = [] __lowerCamelCase = CrossEntropyLoss(reduction="""none""" ) for start_index in logging.tqdm(range(0 , len(UpperCAmelCase ) , UpperCAmelCase ) ): __lowerCamelCase = min(start_index + batch_size , len(UpperCAmelCase ) ) __lowerCamelCase = encoded_texts[start_index:end_index] __lowerCamelCase = attn_masks[start_index:end_index] if add_start_token: __lowerCamelCase = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(UpperCAmelCase ) __lowerCamelCase = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 ) __lowerCamelCase = torch.cat( [torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(UpperCAmelCase ), attn_mask] , dim=1 ) __lowerCamelCase = encoded_batch with torch.no_grad(): __lowerCamelCase = model(UpperCAmelCase , attention_mask=UpperCAmelCase ).logits __lowerCamelCase = out_logits[..., :-1, :].contiguous() __lowerCamelCase = labels[..., 1:].contiguous() __lowerCamelCase = attn_mask[..., 1:].contiguous() __lowerCamelCase = torch.expa( (loss_fct(shift_logits.transpose(1 , 2 ) , UpperCAmelCase ) * shift_attention_mask_batch).sum(1 ) / shift_attention_mask_batch.sum(1 ) ) ppls += perplexity_batch.tolist() return {"perplexities": ppls, "mean_perplexity": np.mean(UpperCAmelCase )}
713
import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class UpperCamelCase_ ( __UpperCamelCase ): """simple docstring""" A = ['''image_processor''', '''tokenizer'''] A = '''ViltImageProcessor''' A = ('''BertTokenizer''', '''BertTokenizerFast''') def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase ): __lowerCamelCase = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , UpperCAmelCase , ) __lowerCamelCase = kwargs.pop("""feature_extractor""" ) __lowerCamelCase = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(UpperCAmelCase , UpperCAmelCase ) __lowerCamelCase = self.image_processor def __call__( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = True , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = 0 , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = True , UpperCAmelCase = None , **UpperCAmelCase , ): __lowerCamelCase = self.tokenizer( text=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , ) # add pixel_values + pixel_mask __lowerCamelCase = self.image_processor(UpperCAmelCase , return_tensors=UpperCAmelCase ) encoding.update(UpperCAmelCase ) return encoding def lowerCamelCase_ ( self , *UpperCAmelCase , **UpperCAmelCase ): return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase ) def lowerCamelCase_ ( self , *UpperCAmelCase , **UpperCAmelCase ): return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase ) @property def lowerCamelCase_ ( self ): __lowerCamelCase = self.tokenizer.model_input_names __lowerCamelCase = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def lowerCamelCase_ ( self ): warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , UpperCAmelCase , ) return self.image_processor_class @property def lowerCamelCase_ ( self ): warnings.warn( """`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , UpperCAmelCase , ) return self.image_processor
571
0
'''simple docstring''' import functools from typing import Any def _a ( _lowerCamelCase , _lowerCamelCase ) -> bool: """simple docstring""" if not isinstance(_lowerCamelCase , _lowerCamelCase ) or len(_lowerCamelCase ) == 0: raise ValueError("""the string should be not empty string""" ) if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not all( isinstance(_lowerCamelCase , _lowerCamelCase ) and len(_lowerCamelCase ) > 0 for item in words ): raise ValueError("""the words should be a list of non-empty strings""" ) # Build trie __snake_case : dict[str, Any] = {} __snake_case : str = """WORD_KEEPER""" for word in words: __snake_case : str = trie for c in word: if c not in trie_node: __snake_case : List[Any] = {} __snake_case : Optional[int] = trie_node[c] __snake_case : List[str] = True __snake_case : List[str] = len(_lowerCamelCase ) # Dynamic programming method @functools.cache def is_breakable(_lowerCamelCase ) -> bool: if index == len_string: return True __snake_case : Optional[int] = trie for i in range(_lowerCamelCase , _lowerCamelCase ): __snake_case : Dict = trie_node.get(string[i] , _lowerCamelCase ) if trie_node is None: return False if trie_node.get(_lowerCamelCase , _lowerCamelCase ) and is_breakable(i + 1 ): return True return False return is_breakable(0 ) if __name__ == "__main__": import doctest doctest.testmod()
26
'''simple docstring''' import datasets import faiss import numpy as np import streamlit as st import torch from elasticsearch import Elasticsearch from elia_utils import ( embed_questions_for_retrieval, make_qa_sas_model, qa_sas_generate, query_es_index, query_qa_dense_index, ) import transformers from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer __UpperCamelCase = "bart" __UpperCamelCase = True @st.cache(allow_output_mutation=_lowerCamelCase ) def _a ( ) -> Union[str, Any]: """simple docstring""" if LOAD_DENSE_INDEX: __snake_case : int = AutoTokenizer.from_pretrained("""yjernite/retribert-base-uncased""" ) __snake_case : Tuple = AutoModel.from_pretrained("""yjernite/retribert-base-uncased""" ).to("""cuda:0""" ) __snake_case : List[Any] = qar_model.eval() else: __snake_case , __snake_case : Optional[Any] = (None, None) if MODEL_TYPE == "bart": __snake_case : List[str] = AutoTokenizer.from_pretrained("""yjernite/bart_eli5""" ) __snake_case : Any = AutoModelForSeqaSeqLM.from_pretrained("""yjernite/bart_eli5""" ).to("""cuda:0""" ) __snake_case : int = torch.load("""seq2seq_models/eli5_bart_model_blm_2.pth""" ) sas_model.load_state_dict(save_dict["""model"""] ) __snake_case : int = sas_model.eval() else: __snake_case , __snake_case : Dict = make_qa_sas_model( model_name="""t5-small""" , from_file="""seq2seq_models/eli5_t5_model_1024_4.pth""" , device="""cuda:0""" ) return (qar_tokenizer, qar_model, sas_tokenizer, sas_model) @st.cache(allow_output_mutation=_lowerCamelCase ) def _a ( ) -> Tuple: """simple docstring""" if LOAD_DENSE_INDEX: __snake_case : Tuple = faiss.StandardGpuResources() __snake_case : Optional[Any] = datasets.load_dataset(path="""wiki_snippets""" , name="""wiki40b_en_100_0""" )["""train"""] __snake_case : str = np.memmap( """wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat""" , dtype="""float32""" , mode="""r""" , shape=(wikiaab_passages.num_rows, 128) , ) __snake_case : Optional[int] = faiss.IndexFlatIP(128 ) __snake_case : Any = faiss.index_cpu_to_gpu(_lowerCamelCase , 1 , _lowerCamelCase ) wikiaab_gpu_index_flat.add(_lowerCamelCase ) # TODO fix for larger GPU else: __snake_case , __snake_case : Tuple = (None, None) __snake_case : List[str] = Elasticsearch([{"""host""": """localhost""", """port""": """9200"""}] ) return (wikiaab_passages, wikiaab_gpu_index_flat, es_client) @st.cache(allow_output_mutation=_lowerCamelCase ) def _a ( ) -> List[Any]: """simple docstring""" __snake_case : Tuple = datasets.load_dataset("""eli5""" , name="""LFQA_reddit""" ) __snake_case : Dict = elia["""train_eli5"""] __snake_case : int = np.memmap( """eli5_questions_reps.dat""" , dtype="""float32""" , mode="""r""" , shape=(elia_train.num_rows, 128) ) __snake_case : Dict = faiss.IndexFlatIP(128 ) eli5_train_q_index.add(_lowerCamelCase ) return (elia_train, eli5_train_q_index) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = load_indexes() __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = load_models() __UpperCamelCase , __UpperCamelCase = load_train_data() def _a ( _lowerCamelCase , _lowerCamelCase=10 ) -> int: """simple docstring""" __snake_case : Optional[int] = embed_questions_for_retrieval([question] , _lowerCamelCase , _lowerCamelCase ) __snake_case , __snake_case : Tuple = eli5_train_q_index.search(_lowerCamelCase , _lowerCamelCase ) __snake_case : Tuple = [elia_train[int(_lowerCamelCase )] for i in I[0]] return nn_examples def _a ( _lowerCamelCase , _lowerCamelCase="wiki40b" , _lowerCamelCase="dense" , _lowerCamelCase=10 ) -> Optional[Any]: """simple docstring""" if source == "none": __snake_case , __snake_case : Dict = (""" <P> """.join(["""""" for _ in range(11 )] ).strip(), []) else: if method == "dense": __snake_case , __snake_case : Dict = query_qa_dense_index( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) else: __snake_case , __snake_case : str = query_es_index( _lowerCamelCase , _lowerCamelCase , index_name="""english_wiki40b_snippets_100w""" , n_results=_lowerCamelCase , ) __snake_case : Optional[int] = [ (res["""article_title"""], res["""section_title"""].strip(), res["""score"""], res["""passage_text"""]) for res in hit_lst ] __snake_case : Optional[Any] = """question: {} context: {}""".format(_lowerCamelCase , _lowerCamelCase ) return question_doc, support_list @st.cache( hash_funcs={ torch.Tensor: (lambda _lowerCamelCase : None), transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _lowerCamelCase : None), } ) def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=64 , _lowerCamelCase=256 , _lowerCamelCase=False , _lowerCamelCase=2 , _lowerCamelCase=0.95 , _lowerCamelCase=0.8 ) -> List[str]: """simple docstring""" with torch.no_grad(): __snake_case : Union[str, Any] = qa_sas_generate( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , num_answers=1 , num_beams=_lowerCamelCase , min_len=_lowerCamelCase , max_len=_lowerCamelCase , do_sample=_lowerCamelCase , temp=_lowerCamelCase , top_p=_lowerCamelCase , top_k=_lowerCamelCase , max_input_length=1024 , device="""cuda:0""" , )[0] return (answer, support_list) st.title("Long Form Question Answering with ELI5") # Start sidebar __UpperCamelCase = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>" __UpperCamelCase = "\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % ( header_html, ) st.sidebar.markdown( header_full, unsafe_allow_html=True, ) # Long Form QA with ELI5 and Wikipedia __UpperCamelCase = "\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n" st.sidebar.markdown(description, unsafe_allow_html=True) __UpperCamelCase = [ "Answer the question", "View the retrieved document only", "View the most similar ELI5 question and answer", "Show me everything, please!", ] __UpperCamelCase = st.sidebar.checkbox("Demo options") if demo_options: __UpperCamelCase = st.sidebar.selectbox( "", action_list, index=3, ) __UpperCamelCase = action_list.index(action_st) __UpperCamelCase = st.sidebar.selectbox( "", ["Show full text of passages", "Show passage section titles"], index=0, ) __UpperCamelCase = show_type == "Show full text of passages" else: __UpperCamelCase = 3 __UpperCamelCase = True __UpperCamelCase = st.sidebar.checkbox("Retrieval options") if retrieval_options: __UpperCamelCase = "\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n " st.sidebar.markdown(retriever_info) __UpperCamelCase = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"]) __UpperCamelCase = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"]) else: __UpperCamelCase = "wiki40b" __UpperCamelCase = "dense" __UpperCamelCase = "beam" __UpperCamelCase = 2 __UpperCamelCase = 64 __UpperCamelCase = 256 __UpperCamelCase = None __UpperCamelCase = None __UpperCamelCase = st.sidebar.checkbox("Generation options") if generate_options: __UpperCamelCase = "\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n " st.sidebar.markdown(generate_info) __UpperCamelCase = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"]) __UpperCamelCase = st.sidebar.slider( "Minimum generation length", min_value=8, max_value=256, value=64, step=8, format=None, key=None ) __UpperCamelCase = st.sidebar.slider( "Maximum generation length", min_value=64, max_value=512, value=256, step=16, format=None, key=None ) if sampled == "beam": __UpperCamelCase = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None) else: __UpperCamelCase = st.sidebar.slider( "Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None ) __UpperCamelCase = st.sidebar.slider( "Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None ) __UpperCamelCase = None # start main text __UpperCamelCase = [ "<MY QUESTION>", "How do people make chocolate?", "Why do we get a fever when we are sick?", "How can different animals perceive different colors?", "What is natural language processing?", "What's the best way to treat a sunburn?", "What exactly are vitamins ?", "How does nuclear energy provide electricity?", "What's the difference between viruses and bacteria?", "Why are flutes classified as woodwinds when most of them are made out of metal ?", "Why do people like drinking coffee even though it tastes so bad?", "What happens when wine ages? How does it make the wine taste better?", "If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?", "How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?", "How does New Zealand have so many large bird predators?", ] __UpperCamelCase = st.selectbox( "What would you like to ask? ---- select <MY QUESTION> to enter a new query", questions_list, index=1, ) if question_s == "<MY QUESTION>": __UpperCamelCase = st.text_input("Enter your question here:", "") else: __UpperCamelCase = question_s if st.button("Show me!"): if action in [0, 1, 3]: if index_type == "mixed": __UpperCamelCase , __UpperCamelCase = make_support(question, source=wiki_source, method="dense", n_results=10) __UpperCamelCase , __UpperCamelCase = make_support(question, source=wiki_source, method="sparse", n_results=10) __UpperCamelCase = [] for res_d, res_s in zip(support_list_dense, support_list_sparse): if tuple(res_d) not in support_list: support_list += [tuple(res_d)] if tuple(res_s) not in support_list: support_list += [tuple(res_s)] __UpperCamelCase = support_list[:10] __UpperCamelCase = "<P> " + " <P> ".join([res[-1] for res in support_list]) else: __UpperCamelCase , __UpperCamelCase = make_support(question, source=wiki_source, method=index_type, n_results=10) if action in [0, 3]: __UpperCamelCase , __UpperCamelCase = answer_question( question_doc, sas_model, sas_tokenizer, min_len=min_len, max_len=int(max_len), sampling=(sampled == "sampled"), n_beams=n_beams, top_p=top_p, temp=temp, ) st.markdown("### The model generated answer is:") st.write(answer) if action in [0, 1, 3] and wiki_source != "none": st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:") for i, res in enumerate(support_list): __UpperCamelCase = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_")) __UpperCamelCase = res[1].strip() if sec_titles == "": __UpperCamelCase = "[{}]({})".format(res[0], wiki_url) else: __UpperCamelCase = sec_titles.split(" & ") __UpperCamelCase = " & ".join( ["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list] ) st.markdown( "{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections), unsafe_allow_html=True, ) if show_passages: st.write( "> <span style=\"font-family:arial; font-size:10pt;\">" + res[-1] + "</span>", unsafe_allow_html=True ) if action in [2, 3]: __UpperCamelCase = find_nearest_training(question) __UpperCamelCase = nn_train_list[0] st.markdown( "--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"]) ) __UpperCamelCase = [ "{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""])) for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"])) if i == 0 or sc > 2 ] st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st))) __UpperCamelCase = "\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n" st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
26
1
'''simple docstring''' import io import os import unicodedata from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _a = logging.get_logger(__name__) _a = "▁" _a = {"vocab_file": "vocab.txt", "sentencepiece_model_ckpt": "sentencepiece.bpe.model"} _a = { "sentencepiece_model_file": "sentencepiece.bpe.model", "vocab_file": "vocab.txt", } _a = { "vocab_file": { "ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt", "ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt", }, "sentencepiece_model_file": { "ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model", "ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model", }, } _a = { "ernie-m-base": 514, "ernie-m-large": 514, } _a = { "ernie-m-base": {"do_lower_case": False}, "ernie-m-large": {"do_lower_case": False}, } class __A ( lowerCAmelCase ): '''simple docstring''' lowerCAmelCase_ = ["""input_ids"""] lowerCAmelCase_ = VOCAB_FILES_NAMES lowerCAmelCase_ = PRETRAINED_INIT_CONFIGURATION lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase_ = RESOURCE_FILES_NAMES def __init__( self , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase="utf8" , __lowerCAmelCase="[UNK]" , __lowerCAmelCase="[SEP]" , __lowerCAmelCase="[PAD]" , __lowerCAmelCase="[CLS]" , __lowerCAmelCase="[MASK]" , __lowerCAmelCase = None , **__lowerCAmelCase , ): '''simple docstring''' lowerCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , vocab_file=__lowerCAmelCase , encoding=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCAmelCase , ) lowerCamelCase__ = do_lower_case lowerCamelCase__ = sentencepiece_model_ckpt lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__lowerCAmelCase ) # to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning if vocab_file is not None: lowerCamelCase__ = self.load_vocab(filepath=__lowerCAmelCase ) else: lowerCamelCase__ = {self.sp_model.id_to_piece(__lowerCAmelCase ): id for id in range(self.sp_model.get_piece_size() )} lowerCamelCase__ = {v: k for k, v in self.vocab.items()} def __lowerCamelCase ( self , __lowerCAmelCase ): '''simple docstring''' if text is None: return None lowerCamelCase__ = self.tokenize(__lowerCAmelCase ) lowerCamelCase__ , lowerCamelCase__ = '''''', [] for i, ch in enumerate(__lowerCAmelCase ): if ch in self.SP_CHAR_MAPPING: lowerCamelCase__ = self.SP_CHAR_MAPPING.get(__lowerCAmelCase ) else: lowerCamelCase__ = unicodedata.normalize('''NFKC''' , __lowerCAmelCase ) if self.is_whitespace(__lowerCAmelCase ): continue normalized_text += ch char_mapping.extend([i] * len(__lowerCAmelCase ) ) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = normalized_text, [], 0 if self.do_lower_case: lowerCamelCase__ = text.lower() for token in split_tokens: if token[:1] == "▁": lowerCamelCase__ = token[1:] lowerCamelCase__ = text[offset:].index(__lowerCAmelCase ) + offset lowerCamelCase__ = start + len(__lowerCAmelCase ) token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) ) lowerCamelCase__ = end return token_mapping @property def __lowerCamelCase ( self ): '''simple docstring''' return len(self.vocab ) def __lowerCamelCase ( self ): '''simple docstring''' return dict(self.vocab , **self.added_tokens_encoder ) def __getstate__( self ): '''simple docstring''' lowerCamelCase__ = self.__dict__.copy() lowerCamelCase__ = None return state def __setstate__( self , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): lowerCamelCase__ = {} lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.sentencepiece_model_ckpt ) def __lowerCamelCase ( self , __lowerCAmelCase ): '''simple docstring''' return "".join((self.SP_CHAR_MAPPING.get(__lowerCAmelCase , __lowerCAmelCase ) for c in text) ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase=False , __lowerCAmelCase=6_4 , __lowerCAmelCase=0.1 ): '''simple docstring''' if self.sp_model_kwargs.get('''enable_sampling''' ) is True: lowerCamelCase__ = True if self.sp_model_kwargs.get('''alpha''' ) is not None: lowerCamelCase__ = self.sp_model_kwargs.get('''alpha''' ) if self.sp_model_kwargs.get('''nbest_size''' ) is not None: lowerCamelCase__ = self.sp_model_kwargs.get('''nbest_size''' ) if not enable_sampling: lowerCamelCase__ = self.sp_model.EncodeAsPieces(__lowerCAmelCase ) else: lowerCamelCase__ = self.sp_model.SampleEncodeAsPieces(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase__ = [] for pi, piece in enumerate(__lowerCAmelCase ): if piece == SPIECE_UNDERLINE: if not pieces[pi + 1].startswith(__lowerCAmelCase ) and pi != 0: new_pieces.append(__lowerCAmelCase ) continue else: continue lowerCamelCase__ = 0 for i, chunk in enumerate(__lowerCAmelCase ): if chunk == SPIECE_UNDERLINE: continue if self.is_ch_char(__lowerCAmelCase ) or self.is_punct(__lowerCAmelCase ): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) new_pieces.append(__lowerCAmelCase ) lowerCamelCase__ = i + 1 elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit(): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) lowerCamelCase__ = i elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit(): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) lowerCamelCase__ = i if len(__lowerCAmelCase ) > lst_i: new_pieces.append(piece[lst_i:] ) return new_pieces def __lowerCamelCase ( self , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = ''''''.join(__lowerCAmelCase ).replace(__lowerCAmelCase , ''' ''' ).strip() return out_string def __lowerCamelCase ( self , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = self.convert_ids_to_tokens(__lowerCAmelCase ) lowerCamelCase__ = ''''''.join(__lowerCAmelCase ).replace(__lowerCAmelCase , ''' ''' ).strip() return out_string def __lowerCamelCase ( self , __lowerCAmelCase ): '''simple docstring''' return self.vocab.get(__lowerCAmelCase , self.vocab.get(self.unk_token ) ) def __lowerCamelCase ( self , __lowerCAmelCase ): '''simple docstring''' return self.reverse_vocab.get(__lowerCAmelCase , self.unk_token ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase=None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowerCamelCase__ = [self.cls_token_id] lowerCamelCase__ = [self.sep_token_id] return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase=None ): '''simple docstring''' if offset_mapping_a is None: return [(0, 0)] + offset_mapping_a + [(0, 0)] return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)] def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=False ): '''simple docstring''' if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(__lowerCAmelCase )) + [1, 1] + ([0] * len(__lowerCAmelCase )) + [1] return [1] + ([0] * len(__lowerCAmelCase )) + [1] def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ): '''simple docstring''' if token_ids_a is None: # [CLS] X [SEP] return (len(__lowerCAmelCase ) + 2) * [0] # [CLS] A [SEP] [SEP] B [SEP] return [0] * (len(__lowerCAmelCase ) + 1) + [1] * (len(__lowerCAmelCase ) + 3) def __lowerCamelCase ( self , __lowerCAmelCase ): '''simple docstring''' if "\u4e00" <= char <= "\u9fff": return True return False def __lowerCamelCase ( self , __lowerCAmelCase ): '''simple docstring''' if ("a" <= char <= "z") or ("A" <= char <= "Z"): return True return False def __lowerCamelCase ( self , __lowerCAmelCase ): '''simple docstring''' if char in ",;:.?!~,;:。?!《》【】": return True return False def __lowerCamelCase ( self , __lowerCAmelCase ): '''simple docstring''' if char == " " or char == "\t" or char == "\n" or char == "\r": return True if len(__lowerCAmelCase ) == 1: lowerCamelCase__ = unicodedata.category(__lowerCAmelCase ) if cat == "Zs": return True return False def __lowerCamelCase ( self , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = {} with io.open(__lowerCAmelCase , '''r''' , encoding='''utf-8''' ) as f: for index, line in enumerate(__lowerCAmelCase ): lowerCamelCase__ = line.rstrip('''\n''' ) lowerCamelCase__ = int(__lowerCAmelCase ) return token_to_idx def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ): '''simple docstring''' lowerCamelCase__ = 0 if os.path.isdir(__lowerCAmelCase ): lowerCamelCase__ = os.path.join( __lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) else: lowerCamelCase__ = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as writer: for token, token_index in sorted(self.vocab.items() , key=lambda __lowerCAmelCase : kv[1] ): if index != token_index: logger.warning( F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.' ''' Please check that the vocabulary is not corrupted!''' ) lowerCamelCase__ = token_index writer.write(token + '''\n''' ) index += 1 lowerCamelCase__ = os.path.join(__lowerCAmelCase , '''sentencepiece.bpe.model''' ) with open(__lowerCAmelCase , '''wb''' ) as fi: lowerCamelCase__ = self.sp_model.serialized_model_proto() fi.write(__lowerCAmelCase ) return (vocab_file,)
705
from __future__ import annotations import unittest from transformers import EsmConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy import tensorflow as tf from transformers.models.esm.modeling_tf_esm import ( TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, TFEsmModel, ) class __A : '''simple docstring''' def __init__( self , __lowerCAmelCase , ): '''simple docstring''' lowerCamelCase__ = parent lowerCamelCase__ = 1_3 lowerCamelCase__ = 7 lowerCamelCase__ = True lowerCamelCase__ = True lowerCamelCase__ = True lowerCamelCase__ = 9_9 lowerCamelCase__ = 3_2 lowerCamelCase__ = 2 lowerCamelCase__ = 4 lowerCamelCase__ = 3_7 lowerCamelCase__ = '''gelu''' lowerCamelCase__ = 0.1 lowerCamelCase__ = 0.1 lowerCamelCase__ = 5_1_2 lowerCamelCase__ = 1_6 lowerCamelCase__ = 2 lowerCamelCase__ = 0.02 lowerCamelCase__ = 3 lowerCamelCase__ = 4 lowerCamelCase__ = None def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase__ = None if self.use_input_mask: lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase__ = None lowerCamelCase__ = None lowerCamelCase__ = None if self.use_labels: lowerCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase__ = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase__ = EsmConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowerCamelCase ( self ): '''simple docstring''' ( ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ) = self.prepare_config_and_inputs() lowerCamelCase__ = True lowerCamelCase__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = TFEsmModel(config=__lowerCAmelCase ) lowerCamelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask} lowerCamelCase__ = model(__lowerCAmelCase ) lowerCamelCase__ = [input_ids, input_mask] lowerCamelCase__ = model(__lowerCAmelCase ) lowerCamelCase__ = model(__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ): '''simple docstring''' lowerCamelCase__ = True lowerCamelCase__ = TFEsmModel(config=__lowerCAmelCase ) lowerCamelCase__ = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''encoder_hidden_states''': encoder_hidden_states, '''encoder_attention_mask''': encoder_attention_mask, } lowerCamelCase__ = model(__lowerCAmelCase ) lowerCamelCase__ = [input_ids, input_mask] lowerCamelCase__ = model(__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase ) # Also check the case where encoder outputs are not passed lowerCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = TFEsmForMaskedLM(config=__lowerCAmelCase ) lowerCamelCase__ = model([input_ids, input_mask] ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = self.num_labels lowerCamelCase__ = TFEsmForTokenClassification(config=__lowerCAmelCase ) lowerCamelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask} lowerCamelCase__ = model(__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.prepare_config_and_inputs() ( ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ) = config_and_inputs lowerCamelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_tf class __A ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ = ( ( TFEsmModel, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, ) if is_tf_available() else () ) lowerCAmelCase_ = ( { """feature-extraction""": TFEsmModel, """fill-mask""": TFEsmForMaskedLM, """text-classification""": TFEsmForSequenceClassification, """token-classification""": TFEsmForTokenClassification, """zero-shot""": TFEsmForSequenceClassification, } if is_tf_available() else {} ) lowerCAmelCase_ = False lowerCAmelCase_ = False def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = TFEsmModelTester(self ) lowerCamelCase__ = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=3_7 ) def __lowerCamelCase ( self ): '''simple docstring''' self.config_tester.run_common_tests() def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCAmelCase ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*__lowerCAmelCase ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase ) @slow def __lowerCamelCase ( self ): '''simple docstring''' for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ = TFEsmModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) @unittest.skip('''Protein models do not support embedding resizing.''' ) def __lowerCamelCase ( self ): '''simple docstring''' pass @unittest.skip('''Protein models do not support embedding resizing.''' ) def __lowerCamelCase ( self ): '''simple docstring''' pass def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ = model_class(__lowerCAmelCase ) assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer ) if model_class is TFEsmForMaskedLM: # Output embedding test differs from the main test because they're a matrix, not a layer lowerCamelCase__ = model.get_bias() assert isinstance(__lowerCAmelCase , __lowerCAmelCase ) for k, v in name.items(): assert isinstance(__lowerCAmelCase , tf.Variable ) else: lowerCamelCase__ = model.get_output_embeddings() assert x is None lowerCamelCase__ = model.get_bias() assert name is None @require_tf class __A ( unittest.TestCase ): '''simple docstring''' @slow def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = TFEsmForMaskedLM.from_pretrained('''facebook/esm2_t6_8M_UR50D''' ) lowerCamelCase__ = tf.constant([[0, 1, 2, 3, 4, 5]] ) lowerCamelCase__ = model(__lowerCAmelCase )[0] lowerCamelCase__ = [1, 6, 3_3] self.assertEqual(list(output.numpy().shape ) , __lowerCAmelCase ) # compare the actual values for a slice. lowerCamelCase__ = tf.constant( [ [ [8.92_1518, -10.58_9814, -6.467_1307], [-6.396_7156, -13.91_1377, -1.121_1915], [-7.78_1247, -13.95_1557, -3.74_0592], ] ] ) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) ) @slow def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = TFEsmModel.from_pretrained('''facebook/esm2_t6_8M_UR50D''' ) lowerCamelCase__ = tf.constant([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] ) lowerCamelCase__ = model(__lowerCAmelCase )[0] # compare the actual values for a slice. lowerCamelCase__ = tf.constant( [ [ [0.1444_3092, 0.5412_5327, 0.324_7739], [0.3034_0484, 0.0052_6676, 0.3107_7722], [0.3227_8043, -0.2498_7096, 0.341_4628], ] ] ) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
29
0
import numpy as np def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]: '''simple docstring''' return np.where(vector > 0 , A__ , (alpha * (np.exp(A__ ) - 1)) ) if __name__ == "__main__": import doctest doctest.testmod()
537
from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __lowercase (__SCREAMING_SNAKE_CASE ): """simple docstring""" _UpperCAmelCase = ["""image_processor""", """tokenizer"""] _UpperCAmelCase = """Pix2StructImageProcessor""" _UpperCAmelCase = ("""T5Tokenizer""", """T5TokenizerFast""") def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = False super().__init__(lowerCAmelCase__ , lowerCAmelCase__ ) def __call__( self , lowerCAmelCase__=None , lowerCAmelCase__ = None , lowerCAmelCase__ = True , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = 2_0_4_8 , lowerCAmelCase__ = 0 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = True , lowerCAmelCase__ = None , **lowerCAmelCase__ , ): """simple docstring""" if images is None and text is None: raise ValueError('You have to specify either images or text.' ) # Get only text if images is None and not self.image_processor.is_vqa: SCREAMING_SNAKE_CASE_ : Optional[Any] = self.tokenizer SCREAMING_SNAKE_CASE_ : str = self.tokenizer( text=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , stride=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_overflowing_tokens=lowerCAmelCase__ , return_special_tokens_mask=lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , return_length=lowerCAmelCase__ , verbose=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ , ) return text_encoding if not self.image_processor.is_vqa: # add pixel_values SCREAMING_SNAKE_CASE_ : List[Any] = self.image_processor( lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , max_patches=lowerCAmelCase__ , **lowerCAmelCase__ ) else: # add pixel_values and bbox SCREAMING_SNAKE_CASE_ : Any = self.image_processor( lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , max_patches=lowerCAmelCase__ , header_text=lowerCAmelCase__ , **lowerCAmelCase__ ) if text is not None and not self.image_processor.is_vqa: SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.tokenizer( text=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , stride=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_overflowing_tokens=lowerCAmelCase__ , return_special_tokens_mask=lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , return_length=lowerCAmelCase__ , verbose=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ , ) if "attention_mask" in text_encoding: SCREAMING_SNAKE_CASE_ : Any = text_encoding.pop('attention_mask' ) if "input_ids" in text_encoding: SCREAMING_SNAKE_CASE_ : List[str] = text_encoding.pop('input_ids' ) else: SCREAMING_SNAKE_CASE_ : Union[str, Any] = None if text_encoding is not None: encoding_image_processor.update(lowerCAmelCase__ ) return encoding_image_processor def UpperCamelCase__ ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ): """simple docstring""" return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ ) def UpperCamelCase__ ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ): """simple docstring""" return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ ) @property def UpperCamelCase__ ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = self.tokenizer.model_input_names SCREAMING_SNAKE_CASE_ : Optional[int] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
101
0
"""simple docstring""" import numpy as np from scipy.spatial.distance import cdist from sklearn.metrics import fa_score import datasets UpperCamelCase__ = '''\ @inproceedings{kakwani2020indicnlpsuite, title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}}, author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar}, year={2020}, booktitle={Findings of EMNLP}, } ''' UpperCamelCase__ = '''\ IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te. ''' UpperCamelCase__ = ''' Compute IndicGLUE evaluation metric associated to each IndicGLUE dataset. Args: predictions: list of predictions to score (as int64), except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32). references: list of ground truth labels corresponding to the predictions (as int64), except for \'cvit-mkb-clsr\' where each reference is a vector (of float32). Returns: depending on the IndicGLUE subset, one or several of: "accuracy": Accuracy "f1": F1 score "precision": Precision@10 Examples: >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"] >>> references = [0, 1] >>> predictions = [0, 1] >>> results = indic_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'accuracy\': 1.0} >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\') >>> references = [0, 1] >>> predictions = [0, 1] >>> results = indic_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'accuracy\': 1.0, \'f1\': 1.0} >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\') >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]] >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]] >>> results = indic_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'precision@10\': 1.0} ''' def UpperCAmelCase ( snake_case : str , snake_case : Union[str, Any] ): return float((preds == labels).mean() ) def UpperCAmelCase ( snake_case : int , snake_case : str ): _lowerCAmelCase:int = simple_accuracy(snake_case , snake_case ) _lowerCAmelCase:Optional[int] = float(fa_score(y_true=snake_case , y_pred=snake_case ) ) return { "accuracy": acc, "f1": fa, } def UpperCAmelCase ( snake_case : int , snake_case : Tuple ): _lowerCAmelCase:Union[str, Any] = np.array(snake_case ) _lowerCAmelCase:str = np.array(snake_case ) _lowerCAmelCase:Union[str, Any] = en_sentvecs.shape[0] # mean centering _lowerCAmelCase:Optional[int] = en_sentvecs - np.mean(snake_case , axis=0 ) _lowerCAmelCase:List[str] = in_sentvecs - np.mean(snake_case , axis=0 ) _lowerCAmelCase:int = cdist(snake_case , snake_case , '''cosine''' ) _lowerCAmelCase:List[Any] = np.array(range(snake_case ) ) _lowerCAmelCase:str = sim.argsort(axis=1 )[:, :10] _lowerCAmelCase:Tuple = np.any(preds == actual[:, None] , axis=1 ) return float(matches.mean() ) @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a__ ( datasets.Metric ): def __UpperCamelCase ( self : Optional[Any]) -> Optional[Any]: """simple docstring""" if self.config_name not in [ "wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", "cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", "wiki-ner", ]: raise KeyError( '''You should supply a configuration name selected in ''' '''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", ''' '''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", ''' '''"wiki-ner"]''') return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { '''predictions''': datasets.Value('''int64''') if self.config_name != '''cvit-mkb-clsr''' else datasets.Sequence(datasets.Value('''float32''')), '''references''': datasets.Value('''int64''') if self.config_name != '''cvit-mkb-clsr''' else datasets.Sequence(datasets.Value('''float32''')), }) ,codebase_urls=[] ,reference_urls=[] ,format='''numpy''' if self.config_name != '''cvit-mkb-clsr''' else None ,) def __UpperCamelCase ( self : Dict ,a__ : Any ,a__ : Dict) -> Tuple: """simple docstring""" if self.config_name == "cvit-mkb-clsr": return {"precision@10": precision_at_aa(a__ ,a__)} elif self.config_name in ["wiki-ner"]: return acc_and_fa(a__ ,a__) elif self.config_name in [ "wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md", ]: return {"accuracy": simple_accuracy(a__ ,a__)} else: raise KeyError( '''You should supply a configuration name selected in ''' '''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", ''' '''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", ''' '''"wiki-ner"]''')
439
"""simple docstring""" def UpperCAmelCase ( snake_case : int , snake_case : int ): return x if y == 0 else greatest_common_divisor(snake_case , x % y ) def UpperCAmelCase ( snake_case : int , snake_case : int ): return (x * y) // greatest_common_divisor(snake_case , snake_case ) def UpperCAmelCase ( snake_case : int = 20 ): _lowerCAmelCase:List[Any] = 1 for i in range(1 , n + 1 ): _lowerCAmelCase:List[str] = lcm(snake_case , snake_case ) return g if __name__ == "__main__": print(F"{solution() = }")
439
1
_lowercase : str =''' # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git ''' _lowercase : List[str] =[{'''type''': '''code''', '''content''': INSTALL_CONTENT}] _lowercase : int ={ '''{processor_class}''': '''FakeProcessorClass''', '''{model_class}''': '''FakeModelClass''', '''{object_class}''': '''FakeObjectClass''', }
305
import os import time import pytest from datasets.utils.filelock import FileLock, Timeout def A__ ( lowercase: Any ) -> Tuple: A : Tuple =FileLock(str(tmpdir / 'foo.lock' ) ) A : Optional[int] =FileLock(str(tmpdir / 'foo.lock' ) ) A : Union[str, Any] =0.01 with locka.acquire(): with pytest.raises(lowercase ): A : Optional[Any] =time.time() locka.acquire(lowercase ) assert time.time() - _start > timeout def A__ ( lowercase: int ) -> Optional[int]: A : Any ='a' * 1_000 + '.lock' A : Optional[Any] =FileLock(str(tmpdir / filename ) ) assert locka._lock_file.endswith('.lock' ) assert not locka._lock_file.endswith(lowercase ) assert len(os.path.basename(locka._lock_file ) ) <= 255 A : str =FileLock(tmpdir / filename ) with locka.acquire(): with pytest.raises(lowercase ): locka.acquire(0 )
305
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available lowercase__ : int = { """configuration_groupvit""": [ """GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GroupViTConfig""", """GroupViTOnnxConfig""", """GroupViTTextConfig""", """GroupViTVisionConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : Optional[int] = [ """GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """GroupViTModel""", """GroupViTPreTrainedModel""", """GroupViTTextModel""", """GroupViTVisionModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : str = [ """TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFGroupViTModel""", """TFGroupViTPreTrainedModel""", """TFGroupViTTextModel""", """TFGroupViTVisionModel""", ] if TYPE_CHECKING: from .configuration_groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, GroupViTOnnxConfig, GroupViTTextConfig, GroupViTVisionConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_groupvit import ( GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_groupvit import ( TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel, ) else: import sys lowercase__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
317
"""simple docstring""" from __future__ import annotations from collections.abc import Callable lowercase__ : Union[str, Any] = list[list[float | int]] def UpperCamelCase_ ( lowerCAmelCase__ : Matrix , lowerCAmelCase__ : Matrix ) -> Matrix: """simple docstring""" lowerCAmelCase_ : int = len(lowerCAmelCase__ ) lowerCAmelCase_ : Matrix = [[0 for _ in range(size + 1 )] for _ in range(lowerCAmelCase__ )] lowerCAmelCase_ : int lowerCAmelCase_ : int lowerCAmelCase_ : int lowerCAmelCase_ : int lowerCAmelCase_ : int lowerCAmelCase_ : float for row in range(lowerCAmelCase__ ): for col in range(lowerCAmelCase__ ): lowerCAmelCase_ : Dict = matrix[row][col] lowerCAmelCase_ : List[str] = vector[row][0] lowerCAmelCase_ : Optional[int] = 0 lowerCAmelCase_ : Optional[Any] = 0 while row < size and col < size: # pivoting lowerCAmelCase_ : Optional[Any] = max((abs(augmented[rowa][col] ), rowa) for rowa in range(lowerCAmelCase__ , lowerCAmelCase__ ) )[ 1 ] if augmented[pivot_row][col] == 0: col += 1 continue else: lowerCAmelCase_ ,lowerCAmelCase_ : Union[str, Any] = augmented[pivot_row], augmented[row] for rowa in range(row + 1 , lowerCAmelCase__ ): lowerCAmelCase_ : Union[str, Any] = augmented[rowa][col] / augmented[row][col] lowerCAmelCase_ : List[str] = 0 for cola in range(col + 1 , size + 1 ): augmented[rowa][cola] -= augmented[row][cola] * ratio row += 1 col += 1 # back substitution for col in range(1 , lowerCAmelCase__ ): for row in range(lowerCAmelCase__ ): lowerCAmelCase_ : Optional[Any] = augmented[row][col] / augmented[col][col] for cola in range(lowerCAmelCase__ , size + 1 ): augmented[row][cola] -= augmented[col][cola] * ratio # round to get rid of numbers like 2.000000000000004 return [ [round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(lowerCAmelCase__ ) ] def UpperCamelCase_ ( lowerCAmelCase__ : list[int] ) -> Callable[[int], int]: """simple docstring""" lowerCAmelCase_ : int = len(lowerCAmelCase__ ) lowerCAmelCase_ : Matrix = [[0 for _ in range(lowerCAmelCase__ )] for _ in range(lowerCAmelCase__ )] lowerCAmelCase_ : Matrix = [[0] for _ in range(lowerCAmelCase__ )] lowerCAmelCase_ : Matrix lowerCAmelCase_ : int lowerCAmelCase_ : int lowerCAmelCase_ : int for x_val, y_val in enumerate(lowerCAmelCase__ ): for col in range(lowerCAmelCase__ ): lowerCAmelCase_ : List[str] = (x_val + 1) ** (size - col - 1) lowerCAmelCase_ : List[Any] = y_val lowerCAmelCase_ : List[str] = solve(lowerCAmelCase__ , lowerCAmelCase__ ) def interpolated_func(lowerCAmelCase__ : int ) -> int: return sum( round(coeffs[x_val][0] ) * (var ** (size - x_val - 1)) for x_val in range(lowerCAmelCase__ ) ) return interpolated_func def UpperCamelCase_ ( lowerCAmelCase__ : int ) -> int: """simple docstring""" return ( 1 - variable + variable**2 - variable**3 + variable**4 - variable**5 + variable**6 - variable**7 + variable**8 - variable**9 + variable**10 ) def UpperCamelCase_ ( lowerCAmelCase__ : Callable[[int], int] = question_function , lowerCAmelCase__ : int = 10 ) -> int: """simple docstring""" lowerCAmelCase_ : list[int] = [func(lowerCAmelCase__ ) for x_val in range(1 , order + 1 )] lowerCAmelCase_ : list[Callable[[int], int]] = [ interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 ) ] lowerCAmelCase_ : int = 0 lowerCAmelCase_ : Callable[[int], int] lowerCAmelCase_ : int for poly in polynomials: lowerCAmelCase_ : Union[str, Any] = 1 while func(lowerCAmelCase__ ) == poly(lowerCAmelCase__ ): x_val += 1 ret += poly(lowerCAmelCase__ ) return ret if __name__ == "__main__": print(f'{solution() = }')
317
1
from ..utils import ( OptionalDependencyNotAvailable, is_flax_available, is_scipy_available, is_torch_available, is_torchsde_available, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_pt_objects import * # noqa F403 else: from .scheduling_consistency_models import CMStochasticIterativeScheduler from .scheduling_ddim import DDIMScheduler from .scheduling_ddim_inverse import DDIMInverseScheduler from .scheduling_ddim_parallel import DDIMParallelScheduler from .scheduling_ddpm import DDPMScheduler from .scheduling_ddpm_parallel import DDPMParallelScheduler from .scheduling_deis_multistep import DEISMultistepScheduler from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler from .scheduling_euler_discrete import EulerDiscreteScheduler from .scheduling_heun_discrete import HeunDiscreteScheduler from .scheduling_ipndm import IPNDMScheduler from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler from .scheduling_karras_ve import KarrasVeScheduler from .scheduling_pndm import PNDMScheduler from .scheduling_repaint import RePaintScheduler from .scheduling_sde_ve import ScoreSdeVeScheduler from .scheduling_sde_vp import ScoreSdeVpScheduler from .scheduling_unclip import UnCLIPScheduler from .scheduling_unipc_multistep import UniPCMultistepScheduler from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin from .scheduling_vq_diffusion import VQDiffusionScheduler try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_flax_objects import * # noqa F403 else: from .scheduling_ddim_flax import FlaxDDIMScheduler from .scheduling_ddpm_flax import FlaxDDPMScheduler from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler from .scheduling_pndm_flax import FlaxPNDMScheduler from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler from .scheduling_utils_flax import ( FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, broadcast_to_shape_from_left, ) try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .scheduling_lms_discrete import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
392
from typing import Dict, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends if is_vision_available(): import PIL # soft dependency if is_pytesseract_available(): import pytesseract lowerCamelCase : List[Any] =logging.get_logger(__name__) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]: return [ int(1000 * (box[0] / width) ), int(1000 * (box[1] / height) ), int(1000 * (box[2] / width) ), int(1000 * (box[3] / height) ), ] def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None ) -> str: UpperCamelCase__ : Any = tesseract_config if tesseract_config is not None else "" # apply OCR UpperCamelCase__ : int = to_pil_image(__lowerCAmelCase ) UpperCamelCase__ , UpperCamelCase__ : Dict = pil_image.size UpperCamelCase__ : Optional[Any] = pytesseract.image_to_data(__lowerCAmelCase , lang=__lowerCAmelCase , output_type="dict" , config=__lowerCAmelCase ) UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : List[str] = data["text"], data["left"], data["top"], data["width"], data["height"] # filter empty words and corresponding coordinates UpperCamelCase__ : Tuple = [idx for idx, word in enumerate(__lowerCAmelCase ) if not word.strip()] UpperCamelCase__ : Tuple = [word for idx, word in enumerate(__lowerCAmelCase ) if idx not in irrelevant_indices] UpperCamelCase__ : Union[str, Any] = [coord for idx, coord in enumerate(__lowerCAmelCase ) if idx not in irrelevant_indices] UpperCamelCase__ : List[str] = [coord for idx, coord in enumerate(__lowerCAmelCase ) if idx not in irrelevant_indices] UpperCamelCase__ : Union[str, Any] = [coord for idx, coord in enumerate(__lowerCAmelCase ) if idx not in irrelevant_indices] UpperCamelCase__ : str = [coord for idx, coord in enumerate(__lowerCAmelCase ) if idx not in irrelevant_indices] # turn coordinates into (left, top, left+width, top+height) format UpperCamelCase__ : List[Any] = [] for x, y, w, h in zip(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): UpperCamelCase__ : Optional[int] = [x, y, x + w, y + h] actual_boxes.append(__lowerCAmelCase ) # finally, normalize the bounding boxes UpperCamelCase__ : int = [] for box in actual_boxes: normalized_boxes.append(normalize_box(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) ) assert len(__lowerCAmelCase ) == len(__lowerCAmelCase ), "Not as many words as there are bounding boxes" return words, normalized_boxes class __a ( A__ ): _lowerCAmelCase : int = ['''pixel_values'''] def __init__( self : Dict , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Dict[str, int] = None , SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Optional[str] = None , SCREAMING_SNAKE_CASE : Optional[str] = "" , **SCREAMING_SNAKE_CASE : Any , ): '''simple docstring''' super().__init__(**SCREAMING_SNAKE_CASE ) UpperCamelCase__ : str = size if size is not None else {"height": 2_24, "width": 2_24} UpperCamelCase__ : str = get_size_dict(SCREAMING_SNAKE_CASE ) UpperCamelCase__ : Tuple = do_resize UpperCamelCase__ : Union[str, Any] = size UpperCamelCase__ : List[str] = resample UpperCamelCase__ : Dict = apply_ocr UpperCamelCase__ : str = ocr_lang UpperCamelCase__ : List[str] = tesseract_config def __lowercase ( self : List[str] , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : Dict[str, int] , SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE : Dict , ): '''simple docstring''' UpperCamelCase__ : Optional[Any] = get_size_dict(SCREAMING_SNAKE_CASE ) if "height" not in size or "width" not in size: raise ValueError(F'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' ) UpperCamelCase__ : Union[str, Any] = (size["height"], size["width"]) return resize(SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) def __lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : ImageInput , SCREAMING_SNAKE_CASE : bool = None , SCREAMING_SNAKE_CASE : Dict[str, int] = None , SCREAMING_SNAKE_CASE : PILImageResampling = None , SCREAMING_SNAKE_CASE : bool = None , SCREAMING_SNAKE_CASE : Optional[str] = None , SCREAMING_SNAKE_CASE : Optional[str] = None , SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE : ChannelDimension = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE : Tuple , ): '''simple docstring''' UpperCamelCase__ : List[str] = do_resize if do_resize is not None else self.do_resize UpperCamelCase__ : Tuple = size if size is not None else self.size UpperCamelCase__ : Any = get_size_dict(SCREAMING_SNAKE_CASE ) UpperCamelCase__ : Tuple = resample if resample is not None else self.resample UpperCamelCase__ : Any = apply_ocr if apply_ocr is not None else self.apply_ocr UpperCamelCase__ : Any = ocr_lang if ocr_lang is not None else self.ocr_lang UpperCamelCase__ : str = tesseract_config if tesseract_config is not None else self.tesseract_config UpperCamelCase__ : Dict = make_list_of_images(SCREAMING_SNAKE_CASE ) if not valid_images(SCREAMING_SNAKE_CASE ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True." ) # All transformations expect numpy arrays. UpperCamelCase__ : Optional[int] = [to_numpy_array(SCREAMING_SNAKE_CASE ) for image in images] if apply_ocr: requires_backends(self , "pytesseract" ) UpperCamelCase__ : Dict = [] UpperCamelCase__ : List[Any] = [] for image in images: UpperCamelCase__ , UpperCamelCase__ : Any = apply_tesseract(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) words_batch.append(SCREAMING_SNAKE_CASE ) boxes_batch.append(SCREAMING_SNAKE_CASE ) if do_resize: UpperCamelCase__ : Union[str, Any] = [self.resize(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE ) for image in images] # flip color channels from RGB to BGR (as Detectron2 requires this) UpperCamelCase__ : Any = [flip_channel_order(SCREAMING_SNAKE_CASE ) for image in images] UpperCamelCase__ : str = [to_channel_dimension_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for image in images] UpperCamelCase__ : Optional[Any] = BatchFeature(data={"pixel_values": images} , tensor_type=SCREAMING_SNAKE_CASE ) if apply_ocr: UpperCamelCase__ : Tuple = words_batch UpperCamelCase__ : Dict = boxes_batch return data
228
0
import copy import re class __snake_case : '''simple docstring''' _snake_case = 'hp' _snake_case = {} _snake_case = None @classmethod def _SCREAMING_SNAKE_CASE ( cls : List[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any) ->Tuple: """simple docstring""" _lowerCamelCase : Optional[int] = prefix _lowerCamelCase : Union[str, Any] = defaults cls.build_naming_info() @staticmethod def _SCREAMING_SNAKE_CASE ( _UpperCamelCase : Any , _UpperCamelCase : List[Any]) ->int: """simple docstring""" if len(_UpperCamelCase) == 0: return "" _lowerCamelCase : Optional[Any] = None if any(char.isdigit() for char in word): raise Exception(F"""Parameters should not contain numbers: '{word}' contains a number""") if word in info["short_word"]: return info["short_word"][word] for prefix_len in range(1 , len(_UpperCamelCase) + 1): _lowerCamelCase : List[str] = word[:prefix_len] if prefix in info["reverse_short_word"]: continue else: _lowerCamelCase : int = prefix break if short_word is None: # Paranoid fallback def int_to_alphabetic(_UpperCamelCase : Dict): _lowerCamelCase : Union[str, Any] = """""" while integer != 0: _lowerCamelCase : Any = chr(ord("""A""") + integer % 10) + s integer //= 10 return s _lowerCamelCase : int = 0 while True: _lowerCamelCase : int = word + """#""" + int_to_alphabetic(_UpperCamelCase) if sword in info["reverse_short_word"]: continue else: _lowerCamelCase : Optional[Any] = sword break _lowerCamelCase : Tuple = short_word _lowerCamelCase : Union[str, Any] = word return short_word @staticmethod def _SCREAMING_SNAKE_CASE ( _UpperCamelCase : List[str] , _UpperCamelCase : Optional[Any]) ->Any: """simple docstring""" _lowerCamelCase : Optional[Any] = param_name.split("""_""") _lowerCamelCase : int = [TrialShortNamer.shortname_for_word(_UpperCamelCase , _UpperCamelCase) for word in words] # We try to create a separatorless short name, but if there is a collision we have to fallback # to a separated short name _lowerCamelCase : str = ["""""", """_"""] for separator in separators: _lowerCamelCase : Optional[Any] = separator.join(_UpperCamelCase) if shortname not in info["reverse_short_param"]: _lowerCamelCase : Any = shortname _lowerCamelCase : Optional[int] = param_name return shortname return param_name @staticmethod def _SCREAMING_SNAKE_CASE ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Union[str, Any]) ->Dict: """simple docstring""" _lowerCamelCase : str = TrialShortNamer.shortname_for_key(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : int = short_name _lowerCamelCase : Optional[int] = param_name @classmethod def _SCREAMING_SNAKE_CASE ( cls : Tuple) ->int: """simple docstring""" if cls.NAMING_INFO is not None: return _lowerCamelCase : str = { """short_word""": {}, """reverse_short_word""": {}, """short_param""": {}, """reverse_short_param""": {}, } _lowerCamelCase : int = list(cls.DEFAULTS.keys()) for k in field_keys: cls.add_new_param_name(_UpperCamelCase , _UpperCamelCase) _lowerCamelCase : List[Any] = info @classmethod def _SCREAMING_SNAKE_CASE ( cls : Dict , _UpperCamelCase : List[str]) ->Dict: """simple docstring""" cls.build_naming_info() assert cls.PREFIX is not None _lowerCamelCase : List[str] = [copy.copy(cls.PREFIX)] for k, v in params.items(): if k not in cls.DEFAULTS: raise Exception(F"""You should provide a default value for the param name {k} with value {v}""") if v == cls.DEFAULTS[k]: # The default value is not added to the name continue _lowerCamelCase : Any = cls.NAMING_INFO["""short_param"""][k] if isinstance(_UpperCamelCase , _UpperCamelCase): _lowerCamelCase : int = 1 if v else 0 _lowerCamelCase : List[str] = """""" if isinstance(_UpperCamelCase , (int, float)) else """-""" _lowerCamelCase : Dict = F"""{key}{sep}{v}""" name.append(_UpperCamelCase) return "_".join(_UpperCamelCase) @classmethod def _SCREAMING_SNAKE_CASE ( cls : int , _UpperCamelCase : Tuple) ->Tuple: """simple docstring""" _lowerCamelCase : Tuple = repr[len(cls.PREFIX) + 1 :] if repr == "": _lowerCamelCase : List[Any] = [] else: _lowerCamelCase : Dict = repr.split("""_""") _lowerCamelCase : Tuple = {} for value in values: if "-" in value: _lowerCamelCase , _lowerCamelCase : Tuple = value.split("""-""") else: _lowerCamelCase : int = re.sub("""[0-9.]""" , """""" , _UpperCamelCase) _lowerCamelCase : Tuple = float(re.sub("""[^0-9.]""" , """""" , _UpperCamelCase)) _lowerCamelCase : Dict = cls.NAMING_INFO["""reverse_short_param"""][p_k] _lowerCamelCase : int = p_v for k in cls.DEFAULTS: if k not in parameters: _lowerCamelCase : List[Any] = cls.DEFAULTS[k] return parameters
15
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowerCAmelCase : Any =logging.get_logger(__name__) lowerCAmelCase : List[Any] ={ "microsoft/swin-tiny-patch4-window7-224": ( "https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json" ), # See all Swin models at https://huggingface.co/models?filter=swin } class __snake_case ( __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' _snake_case = 'swin' _snake_case = { 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self : Optional[int] , _UpperCamelCase : List[str]=224 , _UpperCamelCase : List[str]=4 , _UpperCamelCase : List[Any]=3 , _UpperCamelCase : Dict=96 , _UpperCamelCase : Any=[2, 2, 6, 2] , _UpperCamelCase : Any=[3, 6, 12, 24] , _UpperCamelCase : Tuple=7 , _UpperCamelCase : Tuple=4.0 , _UpperCamelCase : Dict=True , _UpperCamelCase : Tuple=0.0 , _UpperCamelCase : Any=0.0 , _UpperCamelCase : Optional[int]=0.1 , _UpperCamelCase : Any="gelu" , _UpperCamelCase : str=False , _UpperCamelCase : str=0.0_2 , _UpperCamelCase : Dict=1E-5 , _UpperCamelCase : List[str]=32 , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : List[Any]=None , **_UpperCamelCase : List[Any] , ) ->Tuple: """simple docstring""" super().__init__(**_UpperCamelCase) _lowerCamelCase : List[str] = image_size _lowerCamelCase : Tuple = patch_size _lowerCamelCase : Dict = num_channels _lowerCamelCase : Union[str, Any] = embed_dim _lowerCamelCase : str = depths _lowerCamelCase : str = len(_UpperCamelCase) _lowerCamelCase : Optional[Any] = num_heads _lowerCamelCase : Tuple = window_size _lowerCamelCase : int = mlp_ratio _lowerCamelCase : Optional[int] = qkv_bias _lowerCamelCase : List[str] = hidden_dropout_prob _lowerCamelCase : str = attention_probs_dropout_prob _lowerCamelCase : Tuple = drop_path_rate _lowerCamelCase : List[str] = hidden_act _lowerCamelCase : Dict = use_absolute_embeddings _lowerCamelCase : int = layer_norm_eps _lowerCamelCase : str = initializer_range _lowerCamelCase : Dict = encoder_stride # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model _lowerCamelCase : int = int(embed_dim * 2 ** (len(_UpperCamelCase) - 1)) _lowerCamelCase : Dict = ["""stem"""] + [F"""stage{idx}""" for idx in range(1 , len(_UpperCamelCase) + 1)] _lowerCamelCase , _lowerCamelCase : List[str] = get_aligned_output_features_output_indices( out_features=_UpperCamelCase , out_indices=_UpperCamelCase , stage_names=self.stage_names) class __snake_case ( __lowerCAmelCase ): '''simple docstring''' _snake_case = version.parse('1.11' ) @property def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ]) @property def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->float: """simple docstring""" return 1E-4
15
1
import warnings from ...utils import logging from .image_processing_videomae import VideoMAEImageProcessor UpperCAmelCase_ = logging.get_logger(__name__) class lowerCamelCase__ ( _A): """simple docstring""" def __init__( self : Union[str, Any] , *__lowerCAmelCase : Tuple , **__lowerCAmelCase : str ) -> None: warnings.warn( '''The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use VideoMAEImageProcessor instead.''' , __lowerCAmelCase , ) super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
2
"""simple docstring""" from datasets.utils.patching import _PatchedModuleObj, patch_submodule from . import _test_patching def lowerCamelCase__ ( ) -> List[str]: """simple docstring""" import os as original_os from os import path as original_path from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join _UpperCamelCase = '''__test_patch_submodule_mock__''' with patch_submodule(_test_patching, '''os.path.join''', __snake_case ): # Every way to access os.path.join must be patched, and the rest must stay untouched # check os.path.join assert isinstance(_test_patching.os, _PatchedModuleObj ) assert isinstance(_test_patching.os.path, _PatchedModuleObj ) assert _test_patching.os.path.join is mock # check path.join assert isinstance(_test_patching.path, _PatchedModuleObj ) assert _test_patching.path.join is mock # check join assert _test_patching.join is mock # check that the other attributes are untouched assert _test_patching.os.rename is original_rename assert _test_patching.path.dirname is original_dirname assert _test_patching.os.path.dirname is original_dirname # Even renamed modules or objects must be patched # check renamed_os.path.join assert isinstance(_test_patching.renamed_os, _PatchedModuleObj ) assert isinstance(_test_patching.renamed_os.path, _PatchedModuleObj ) assert _test_patching.renamed_os.path.join is mock # check renamed_path.join assert isinstance(_test_patching.renamed_path, _PatchedModuleObj ) assert _test_patching.renamed_path.join is mock # check renamed_join assert _test_patching.renamed_join is mock # check that the other attributes are untouched assert _test_patching.renamed_os.rename is original_rename assert _test_patching.renamed_path.dirname is original_dirname assert _test_patching.renamed_os.path.dirname is original_dirname # check that everthing is back to normal when the patch is over assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join def lowerCamelCase__ ( ) -> List[str]: """simple docstring""" assert _test_patching.open is open _UpperCamelCase = '''__test_patch_submodule_builtin_mock__''' # _test_patching has "open" in its globals assert _test_patching.open is open with patch_submodule(_test_patching, '''open''', __snake_case ): assert _test_patching.open is mock # check that everthing is back to normal when the patch is over assert _test_patching.open is open def lowerCamelCase__ ( ) -> Union[str, Any]: """simple docstring""" _UpperCamelCase = '''__test_patch_submodule_missing_mock__''' with patch_submodule(_test_patching, '''pandas.read_csv''', __snake_case ): pass def lowerCamelCase__ ( ) -> Dict: """simple docstring""" _UpperCamelCase = '''__test_patch_submodule_missing_builtin_mock__''' # _test_patching doesn't have "len" in its globals assert getattr(_test_patching, '''len''', __snake_case ) is None with patch_submodule(_test_patching, '''len''', __snake_case ): assert _test_patching.len is mock assert _test_patching.len is len def lowerCamelCase__ ( ) -> Tuple: """simple docstring""" _UpperCamelCase = '''__test_patch_submodule_start_and_stop_mock__''' _UpperCamelCase = patch_submodule(_test_patching, '''open''', __snake_case ) assert _test_patching.open is open patch.start() assert _test_patching.open is mock patch.stop() assert _test_patching.open is open def lowerCamelCase__ ( ) -> Optional[int]: """simple docstring""" from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join _UpperCamelCase = '''__test_patch_submodule_successive_join__''' _UpperCamelCase = '''__test_patch_submodule_successive_dirname__''' _UpperCamelCase = '''__test_patch_submodule_successive_rename__''' assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename with patch_submodule(_test_patching, '''os.path.join''', __snake_case ): with patch_submodule(_test_patching, '''os.rename''', __snake_case ): with patch_submodule(_test_patching, '''os.path.dirname''', __snake_case ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename # try another order with patch_submodule(_test_patching, '''os.rename''', __snake_case ): with patch_submodule(_test_patching, '''os.path.join''', __snake_case ): with patch_submodule(_test_patching, '''os.path.dirname''', __snake_case ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename def lowerCamelCase__ ( ) -> str: """simple docstring""" _UpperCamelCase = '''__test_patch_submodule_doesnt_exist_mock__''' with patch_submodule(_test_patching, '''__module_that_doesn_exist__.__attribute_that_doesn_exist__''', __snake_case ): pass with patch_submodule(_test_patching, '''os.__attribute_that_doesn_exist__''', __snake_case ): pass
19
0
"""simple docstring""" SCREAMING_SNAKE_CASE__ : dict[str, float] ={ "km/h": 1.0, "m/s": 3.6, "mph": 1.60_9344, "knot": 1.852, } SCREAMING_SNAKE_CASE__ : dict[str, float] ={ "km/h": 1.0, "m/s": 0.2_7777_7778, "mph": 0.6_2137_1192, "knot": 0.5_3995_6803, } def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->float: if unit_to not in speed_chart or unit_from not in speed_chart_inverse: _lowerCamelCase : Any = ( F'''Incorrect \'from_type\' or \'to_type\' value: {unit_from!r}, {unit_to!r}\n''' F'''Valid values are: {", ".join(SCREAMING_SNAKE_CASE_ )}''' ) raise ValueError(SCREAMING_SNAKE_CASE_ ) return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 ) if __name__ == "__main__": import doctest doctest.testmod()
558
"""simple docstring""" import json import os import shutil import tempfile import unittest from multiprocessing import get_context from pathlib import Path import datasets import numpy as np from datasets import load_dataset from parameterized import parameterized from transformers import AutoProcessor from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available from ..wavaveca.test_feature_extraction_wavaveca import floats_list if is_pyctcdecode_available(): from huggingface_hub import snapshot_download from pyctcdecode import BeamSearchDecoderCTC from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput if is_torch_available(): from transformers import WavaVecaForCTC @require_pyctcdecode class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" def a__ ( self ) -> Tuple: _lowerCamelCase : List[Any] = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split() _lowerCamelCase : str = dict(zip(_lowercase , range(len(_lowercase ) ) ) ) _lowerCamelCase : Tuple = { '''unk_token''': '''<unk>''', '''bos_token''': '''<s>''', '''eos_token''': '''</s>''', } _lowerCamelCase : Tuple = { '''feature_size''': 1, '''padding_value''': 0.0, '''sampling_rate''': 16000, '''return_attention_mask''': False, '''do_normalize''': True, } _lowerCamelCase : List[str] = tempfile.mkdtemp() _lowerCamelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) _lowerCamelCase : int = os.path.join(self.tmpdirname , _lowercase ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(_lowercase ) + '''\n''' ) with open(self.feature_extraction_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(_lowercase ) + '''\n''' ) # load decoder from hub _lowerCamelCase : Optional[Any] = '''hf-internal-testing/ngram-beam-search-decoder''' def a__ ( self , **_lowercase ) -> Optional[Any]: _lowerCamelCase : str = self.add_kwargs_tokens_map.copy() kwargs.update(_lowercase ) return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **_lowercase ) def a__ ( self , **_lowercase ) -> Dict: return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **_lowercase ) def a__ ( self , **_lowercase ) -> int: return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **_lowercase ) def a__ ( self ) -> List[str]: shutil.rmtree(self.tmpdirname ) def a__ ( self ) -> Optional[int]: _lowerCamelCase : Optional[int] = self.get_tokenizer() _lowerCamelCase : Dict = self.get_feature_extractor() _lowerCamelCase : Any = self.get_decoder() _lowerCamelCase : str = WavaVecaProcessorWithLM(tokenizer=_lowercase , feature_extractor=_lowercase , decoder=_lowercase ) processor.save_pretrained(self.tmpdirname ) _lowerCamelCase : Optional[int] = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname ) # tokenizer self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , _lowercase ) # feature extractor self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor , _lowercase ) # decoder self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels ) self.assertEqual( processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , ) self.assertIsInstance(processor.decoder , _lowercase ) def a__ ( self ) -> Dict: _lowerCamelCase : List[Any] = WavaVecaProcessorWithLM( tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() ) processor.save_pretrained(self.tmpdirname ) # make sure that error is thrown when decoder alphabet doesn't match _lowerCamelCase : Dict = WavaVecaProcessorWithLM.from_pretrained( self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 ) # decoder self.assertEqual(processor.language_model.alpha , 5.0 ) self.assertEqual(processor.language_model.beta , 3.0 ) self.assertEqual(processor.language_model.score_boundary , -7.0 ) self.assertEqual(processor.language_model.unk_score_offset , 3 ) def a__ ( self ) -> str: _lowerCamelCase : Dict = self.get_tokenizer() # add token to trigger raise tokenizer.add_tokens(['''xx'''] ) with self.assertRaisesRegex(_lowercase , '''include''' ): WavaVecaProcessorWithLM( tokenizer=_lowercase , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() ) def a__ ( self ) -> List[str]: _lowerCamelCase : List[str] = self.get_feature_extractor() _lowerCamelCase : Union[str, Any] = self.get_tokenizer() _lowerCamelCase : Tuple = self.get_decoder() _lowerCamelCase : Dict = WavaVecaProcessorWithLM(tokenizer=_lowercase , feature_extractor=_lowercase , decoder=_lowercase ) _lowerCamelCase : str = floats_list((3, 1000) ) _lowerCamelCase : Union[str, Any] = feature_extractor(_lowercase , return_tensors='''np''' ) _lowerCamelCase : Union[str, Any] = processor(_lowercase , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def a__ ( self ) -> List[Any]: _lowerCamelCase : Optional[Any] = self.get_feature_extractor() _lowerCamelCase : Tuple = self.get_tokenizer() _lowerCamelCase : Dict = self.get_decoder() _lowerCamelCase : int = WavaVecaProcessorWithLM(tokenizer=_lowercase , feature_extractor=_lowercase , decoder=_lowercase ) _lowerCamelCase : List[Any] = '''This is a test string''' _lowerCamelCase : Tuple = processor(text=_lowercase ) _lowerCamelCase : List[Any] = tokenizer(_lowercase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def a__ ( self , _lowercase=(2, 10, 16) , _lowercase=77 ) -> Tuple: np.random.seed(_lowercase ) return np.random.rand(*_lowercase ) def a__ ( self ) -> Optional[int]: _lowerCamelCase : Optional[Any] = self.get_feature_extractor() _lowerCamelCase : Optional[Any] = self.get_tokenizer() _lowerCamelCase : List[str] = self.get_decoder() _lowerCamelCase : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=_lowercase , feature_extractor=_lowercase , decoder=_lowercase ) _lowerCamelCase : List[str] = self._get_dummy_logits(shape=(10, 16) , seed=13 ) _lowerCamelCase : Optional[Any] = processor.decode(_lowercase ) _lowerCamelCase : Union[str, Any] = decoder.decode_beams(_lowercase )[0] self.assertEqual(decoded_decoder[0] , decoded_processor.text ) self.assertEqual('''</s> <s> </s>''' , decoded_processor.text ) self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score ) self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score ) @parameterized.expand([[None], ['''fork'''], ['''spawn''']] ) def a__ ( self , _lowercase ) -> Any: _lowerCamelCase : List[Any] = self.get_feature_extractor() _lowerCamelCase : Union[str, Any] = self.get_tokenizer() _lowerCamelCase : Any = self.get_decoder() _lowerCamelCase : List[str] = WavaVecaProcessorWithLM(tokenizer=_lowercase , feature_extractor=_lowercase , decoder=_lowercase ) _lowerCamelCase : Any = self._get_dummy_logits() # note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM. # otherwise, the LM won't be available to the pool's sub-processes. # manual logic used to allow parameterized test for both pool=None and pool=Pool(...) if pool_context is None: _lowerCamelCase : List[str] = processor.batch_decode(_lowercase ) else: with get_context(_lowercase ).Pool() as pool: _lowerCamelCase : Optional[int] = processor.batch_decode(_lowercase , _lowercase ) _lowerCamelCase : Optional[int] = list(_lowercase ) with get_context('''fork''' ).Pool() as p: _lowerCamelCase : Optional[Any] = decoder.decode_beams_batch(_lowercase , _lowercase ) _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Dict = [], [], [] for beams in decoded_beams: texts_decoder.append(beams[0][0] ) logit_scores_decoder.append(beams[0][-2] ) lm_scores_decoder.append(beams[0][-1] ) self.assertListEqual(_lowercase , decoded_processor.text ) self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] , decoded_processor.text ) self.assertListEqual(_lowercase , decoded_processor.logit_score ) self.assertListEqual(_lowercase , decoded_processor.lm_score ) def a__ ( self ) -> Any: _lowerCamelCase : Any = self.get_feature_extractor() _lowerCamelCase : List[Any] = self.get_tokenizer() _lowerCamelCase : int = self.get_decoder() _lowerCamelCase : Any = WavaVecaProcessorWithLM(tokenizer=_lowercase , feature_extractor=_lowercase , decoder=_lowercase ) _lowerCamelCase : List[str] = self._get_dummy_logits() _lowerCamelCase : int = 15 _lowerCamelCase : Union[str, Any] = -20.0 _lowerCamelCase : Optional[Any] = -4.0 _lowerCamelCase : str = processor.batch_decode( _lowercase , beam_width=_lowercase , beam_prune_logp=_lowercase , token_min_logp=_lowercase , ) _lowerCamelCase : Optional[Any] = decoded_processor_out.text _lowerCamelCase : str = list(_lowercase ) with get_context('''fork''' ).Pool() as pool: _lowerCamelCase : List[Any] = decoder.decode_beams_batch( _lowercase , _lowercase , beam_width=_lowercase , beam_prune_logp=_lowercase , token_min_logp=_lowercase , ) _lowerCamelCase : Tuple = [d[0][0] for d in decoded_decoder_out] _lowerCamelCase : Any = [d[0][2] for d in decoded_decoder_out] _lowerCamelCase : Optional[Any] = [d[0][3] for d in decoded_decoder_out] self.assertListEqual(_lowercase , _lowercase ) self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] , _lowercase ) self.assertTrue(np.array_equal(_lowercase , decoded_processor_out.logit_score ) ) self.assertTrue(np.allclose([-20.054, -18.447] , _lowercase , atol=1E-3 ) ) self.assertTrue(np.array_equal(_lowercase , decoded_processor_out.lm_score ) ) self.assertTrue(np.allclose([-15.554, -13.9474] , _lowercase , atol=1E-3 ) ) def a__ ( self ) -> str: _lowerCamelCase : Any = self.get_feature_extractor() _lowerCamelCase : Optional[int] = self.get_tokenizer() _lowerCamelCase : List[str] = self.get_decoder() _lowerCamelCase : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=_lowercase , feature_extractor=_lowercase , decoder=_lowercase ) _lowerCamelCase : Optional[int] = self._get_dummy_logits() _lowerCamelCase : Optional[int] = 2.0 _lowerCamelCase : Optional[Any] = 5.0 _lowerCamelCase : int = -20.0 _lowerCamelCase : Any = True _lowerCamelCase : List[Any] = processor.batch_decode( _lowercase , alpha=_lowercase , beta=_lowercase , unk_score_offset=_lowercase , lm_score_boundary=_lowercase , ) _lowerCamelCase : Optional[Any] = decoded_processor_out.text _lowerCamelCase : Optional[int] = list(_lowercase ) decoder.reset_params( alpha=_lowercase , beta=_lowercase , unk_score_offset=_lowercase , lm_score_boundary=_lowercase , ) with get_context('''fork''' ).Pool() as pool: _lowerCamelCase : Tuple = decoder.decode_beams_batch( _lowercase , _lowercase , ) _lowerCamelCase : Optional[int] = [d[0][0] for d in decoded_decoder_out] self.assertListEqual(_lowercase , _lowercase ) self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] , _lowercase ) _lowerCamelCase : int = processor.decoder.model_container[processor.decoder._model_key] self.assertEqual(lm_model.alpha , 2.0 ) self.assertEqual(lm_model.beta , 5.0 ) self.assertEqual(lm_model.unk_score_offset , -20.0 ) self.assertEqual(lm_model.score_boundary , _lowercase ) def a__ ( self ) -> Dict: _lowerCamelCase : List[Any] = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) _lowerCamelCase : Dict = processor.decoder.model_container[processor.decoder._model_key] _lowerCamelCase : Union[str, Any] = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute() _lowerCamelCase : Optional[Any] = os.listdir(_lowercase ) _lowerCamelCase : str = ['''alphabet.json''', '''language_model'''] downloaded_decoder_files.sort() expected_decoder_files.sort() # test that only decoder relevant files from # https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main # are downloaded and none of the rest (e.g. README.md, ...) self.assertListEqual(_lowercase , _lowercase ) def a__ ( self ) -> int: _lowerCamelCase : Union[str, Any] = snapshot_download('''hf-internal-testing/processor_with_lm''' ) _lowerCamelCase : Dict = WavaVecaProcessorWithLM.from_pretrained(_lowercase ) _lowerCamelCase : str = processor.decoder.model_container[processor.decoder._model_key] _lowerCamelCase : Dict = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute() _lowerCamelCase : Optional[Any] = os.listdir(_lowercase ) _lowerCamelCase : Optional[Any] = os.listdir(_lowercase ) local_decoder_files.sort() expected_decoder_files.sort() # test that both decoder form hub and local files in cache are the same self.assertListEqual(_lowercase , _lowercase ) def a__ ( self ) -> Optional[int]: _lowerCamelCase : List[str] = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) _lowerCamelCase : Optional[Any] = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' ) _lowerCamelCase : int = floats_list((3, 1000) ) _lowerCamelCase : Any = processor_wavaveca(_lowercase , return_tensors='''np''' ) _lowerCamelCase : List[str] = processor_auto(_lowercase , return_tensors='''np''' ) for key in input_wavaveca.keys(): self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 ) _lowerCamelCase : List[Any] = self._get_dummy_logits() _lowerCamelCase : Any = processor_wavaveca.batch_decode(_lowercase ) _lowerCamelCase : str = processor_auto.batch_decode(_lowercase ) self.assertListEqual(decoded_wavaveca.text , decoded_auto.text ) def a__ ( self ) -> Dict: _lowerCamelCase : List[str] = self.get_feature_extractor() _lowerCamelCase : Dict = self.get_tokenizer() _lowerCamelCase : Tuple = self.get_decoder() _lowerCamelCase : str = WavaVecaProcessorWithLM(tokenizer=_lowercase , feature_extractor=_lowercase , decoder=_lowercase ) self.assertListEqual( processor.model_input_names , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , ) @staticmethod def a__ ( _lowercase , _lowercase ) -> Tuple: _lowerCamelCase : Dict = [d[key] for d in offsets] return retrieved_list def a__ ( self ) -> Tuple: _lowerCamelCase : Dict = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) _lowerCamelCase : int = self._get_dummy_logits()[0] _lowerCamelCase : str = processor.decode(_lowercase , output_word_offsets=_lowercase ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) , 4 ) self.assertTrue('''text''' in outputs ) self.assertTrue('''word_offsets''' in outputs ) self.assertTrue(isinstance(_lowercase , _lowercase ) ) self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) ) , outputs.text ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''start_offset''' ) , [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''end_offset''' ) , [1, 3, 5] ) def a__ ( self ) -> Tuple: _lowerCamelCase : List[str] = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) _lowerCamelCase : str = self._get_dummy_logits() _lowerCamelCase : Any = processor.batch_decode(_lowercase , output_word_offsets=_lowercase ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) , 4 ) self.assertTrue('''text''' in outputs ) self.assertTrue('''word_offsets''' in outputs ) self.assertTrue(isinstance(_lowercase , _lowercase ) ) self.assertListEqual( [''' '''.join(self.get_from_offsets(_lowercase , '''word''' ) ) for o in outputs['''word_offsets''']] , outputs.text ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''start_offset''' ) , [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''end_offset''' ) , [1, 3, 5] ) @slow @require_torch @require_torchaudio def a__ ( self ) -> List[str]: import torch _lowerCamelCase : Union[str, Any] = load_dataset('''common_voice''' , '''en''' , split='''train''' , streaming=_lowercase ) _lowerCamelCase : Union[str, Any] = ds.cast_column('''audio''' , datasets.Audio(sampling_rate=16000 ) ) _lowerCamelCase : List[str] = iter(_lowercase ) _lowerCamelCase : List[str] = next(_lowercase ) _lowerCamelCase : int = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' ) _lowerCamelCase : Tuple = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' ) # compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train _lowerCamelCase : List[str] = processor(sample['''audio''']['''array'''] , return_tensors='''pt''' ).input_values with torch.no_grad(): _lowerCamelCase : List[str] = model(_lowercase ).logits.cpu().numpy() _lowerCamelCase : Optional[Any] = processor.decode(logits[0] , output_word_offsets=_lowercase ) _lowerCamelCase : Tuple = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate _lowerCamelCase : Union[str, Any] = [ { '''start_time''': d['''start_offset'''] * time_offset, '''end_time''': d['''end_offset'''] * time_offset, '''word''': d['''word'''], } for d in output['''word_offsets'''] ] _lowerCamelCase : Dict = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL''' # output words self.assertEqual(''' '''.join(self.get_from_offsets(_lowercase , '''word''' ) ) , _lowercase ) self.assertEqual(''' '''.join(self.get_from_offsets(_lowercase , '''word''' ) ) , output.text ) # output times _lowerCamelCase : str = torch.tensor(self.get_from_offsets(_lowercase , '''start_time''' ) ) _lowerCamelCase : Optional[int] = torch.tensor(self.get_from_offsets(_lowercase , '''end_time''' ) ) # fmt: off _lowerCamelCase : Tuple = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] ) _lowerCamelCase : Optional[Any] = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] ) # fmt: on self.assertTrue(torch.allclose(_lowercase , _lowercase , atol=0.01 ) ) self.assertTrue(torch.allclose(_lowercase , _lowercase , atol=0.01 ) )
558
1
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary # Register SEW's fairseq modules from sew_asapp import tasks # noqa: F401 from transformers import ( SEWConfig, SEWForCTC, SEWModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { """post_extract_proj""": """feature_projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.upsample.0""": """encoder.upsample.projection""", """encoder.layer_norm""": """encoder.layer_norm""", """w2v_model.layer_norm""": """layer_norm""", """w2v_encoder.proj""": """lm_head""", """mask_emb""": """masked_spec_embed""", } def __magic_name__ ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[Any]: """simple docstring""" for attribute in key.split(""".""" ): lowercase_ : str = getattr(lowercase , lowercase ) if weight_type is not None: lowercase_ : Dict = getattr(lowercase , lowercase ).shape else: lowercase_ : Optional[int] = hf_pointer.shape assert hf_shape == value.shape, ( f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": lowercase_ : Dict = value elif weight_type == "weight_g": lowercase_ : Union[str, Any] = value elif weight_type == "weight_v": lowercase_ : Dict = value elif weight_type == "bias": lowercase_ : Optional[Any] = value else: lowercase_ : int = value logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def __magic_name__ ( lowercase , lowercase , lowercase ) -> int: """simple docstring""" lowercase_ : Any = [] lowercase_ : Optional[Any] = fairseq_model.state_dict() lowercase_ : List[str] = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): lowercase_ : List[Any] = False if "conv_layers" in name: load_conv_layer( lowercase , lowercase , lowercase , lowercase , hf_model.config.feat_extract_norm == """group""" , ) lowercase_ : Optional[Any] = True else: for key, mapped_key in MAPPING.items(): lowercase_ : Union[str, Any] = """sew.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: lowercase_ : Any = True if "*" in mapped_key: lowercase_ : Tuple = name.split(lowercase )[0].split(""".""" )[-2] lowercase_ : int = mapped_key.replace("""*""" , lowercase ) if "weight_g" in name: lowercase_ : Optional[Any] = """weight_g""" elif "weight_v" in name: lowercase_ : Optional[int] = """weight_v""" elif "weight" in name: lowercase_ : Any = """weight""" elif "bias" in name: lowercase_ : Tuple = """bias""" else: lowercase_ : List[str] = None set_recursively(lowercase , lowercase , lowercase , lowercase , lowercase ) continue if not is_used: unused_weights.append(lowercase ) logger.warning(f"""Unused weights: {unused_weights}""" ) def __magic_name__ ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[Any]: """simple docstring""" lowercase_ : Any = full_name.split("""conv_layers.""" )[-1] lowercase_ : Union[str, Any] = name.split(""".""" ) lowercase_ : List[str] = int(items[0] ) lowercase_ : Any = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) lowercase_ : Union[str, Any] = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) lowercase_ : Optional[Any] = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) lowercase_ : Optional[Any] = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) lowercase_ : Dict = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(lowercase ) def __magic_name__ ( lowercase , lowercase ) -> Optional[int]: """simple docstring""" lowercase_ : str = SEWConfig() if is_finetuned: lowercase_ : Optional[int] = model.wav_encoder.wav_model.cfg else: lowercase_ : Any = model.cfg lowercase_ : str = fs_config.conv_bias lowercase_ : List[Any] = eval(fs_config.conv_feature_layers ) lowercase_ : int = [x[0] for x in conv_layers] lowercase_ : Optional[Any] = [x[1] for x in conv_layers] lowercase_ : Dict = [x[2] for x in conv_layers] lowercase_ : Optional[int] = """gelu""" lowercase_ : int = """layer""" if fs_config.extractor_mode == """layer_norm""" else """group""" lowercase_ : Optional[int] = 0.0 lowercase_ : Optional[Any] = fs_config.activation_fn.name lowercase_ : Union[str, Any] = fs_config.encoder_embed_dim lowercase_ : Optional[Any] = 0.02 lowercase_ : List[Any] = fs_config.encoder_ffn_embed_dim lowercase_ : Optional[Any] = 1E-5 lowercase_ : str = fs_config.encoder_layerdrop lowercase_ : List[str] = fs_config.encoder_attention_heads lowercase_ : str = fs_config.conv_pos_groups lowercase_ : List[str] = fs_config.conv_pos lowercase_ : Tuple = len(lowercase ) lowercase_ : List[str] = fs_config.encoder_layers lowercase_ : List[Any] = fs_config.squeeze_factor # take care of any params that are overridden by the Wav2VecCtc model if is_finetuned: lowercase_ : Union[str, Any] = model.cfg lowercase_ : List[Any] = fs_config.final_dropout lowercase_ : Optional[int] = fs_config.layerdrop lowercase_ : Union[str, Any] = fs_config.activation_dropout lowercase_ : Optional[Any] = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0 lowercase_ : List[str] = fs_config.attention_dropout lowercase_ : List[str] = fs_config.dropout_input lowercase_ : Tuple = fs_config.dropout lowercase_ : Optional[Any] = fs_config.mask_channel_length lowercase_ : Optional[int] = fs_config.mask_channel_prob lowercase_ : Union[str, Any] = fs_config.mask_length lowercase_ : Union[str, Any] = fs_config.mask_prob lowercase_ : Tuple = """Wav2Vec2FeatureExtractor""" lowercase_ : str = """Wav2Vec2CTCTokenizer""" return config @torch.no_grad() def __magic_name__ ( lowercase , lowercase , lowercase=None , lowercase=None , lowercase=True ) -> str: """simple docstring""" if is_finetuned: lowercase_ , lowercase_ , lowercase_ : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) else: lowercase_ , lowercase_ , lowercase_ : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) if config_path is not None: lowercase_ : int = SEWConfig.from_pretrained(lowercase ) else: lowercase_ : Optional[int] = convert_config(model[0] , lowercase ) lowercase_ : Tuple = model[0].eval() lowercase_ : Optional[Any] = True if config.feat_extract_norm == """layer""" else False lowercase_ : Optional[int] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=lowercase , return_attention_mask=lowercase , ) if is_finetuned: if dict_path: lowercase_ : Optional[Any] = Dictionary.load(lowercase ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq lowercase_ : Union[str, Any] = target_dict.pad_index lowercase_ : List[Any] = target_dict.bos_index lowercase_ : int = target_dict.pad_index lowercase_ : str = target_dict.bos_index lowercase_ : Dict = target_dict.eos_index lowercase_ : Optional[int] = len(target_dict.symbols ) lowercase_ : Any = os.path.join(lowercase , """vocab.json""" ) if not os.path.isdir(lowercase ): logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(lowercase ) ) return os.makedirs(lowercase , exist_ok=lowercase ) with open(lowercase , """w""" , encoding="""utf-8""" ) as vocab_handle: json.dump(target_dict.indices , lowercase ) lowercase_ : Optional[int] = WavaVecaCTCTokenizer( lowercase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=lowercase , ) lowercase_ : Optional[Any] = WavaVecaProcessor(feature_extractor=lowercase , tokenizer=lowercase ) processor.save_pretrained(lowercase ) lowercase_ : Any = SEWForCTC(lowercase ) else: lowercase_ : Any = SEWModel(lowercase ) feature_extractor.save_pretrained(lowercase ) recursively_load_weights(lowercase , lowercase , lowercase ) hf_model.save_pretrained(lowercase ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--is_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) UpperCAmelCase_ = parser.parse_args() convert_sew_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned )
458
from string import ascii_uppercase UpperCAmelCase_ = {char: i for i, char in enumerate(ascii_uppercase)} UpperCAmelCase_ = dict(enumerate(ascii_uppercase)) def __magic_name__ ( lowercase , lowercase ) -> str: """simple docstring""" lowercase_ : Dict = len(lowercase ) lowercase_ : Tuple = 0 while True: if x == i: lowercase_ : Dict = 0 if len(lowercase ) == len(lowercase ): break key += key[i] i += 1 return key def __magic_name__ ( lowercase , lowercase ) -> str: """simple docstring""" lowercase_ : Any = """""" lowercase_ : Optional[Any] = 0 for letter in message: if letter == " ": cipher_text += " " else: lowercase_ : Any = (dicta[letter] - dicta[key_new[i]]) % 26 i += 1 cipher_text += dicta[x] return cipher_text def __magic_name__ ( lowercase , lowercase ) -> str: """simple docstring""" lowercase_ : Optional[Any] = """""" lowercase_ : Optional[Any] = 0 for letter in cipher_text: if letter == " ": or_txt += " " else: lowercase_ : Optional[int] = (dicta[letter] + dicta[key_new[i]] + 26) % 26 i += 1 or_txt += dicta[x] return or_txt def __magic_name__ ( ) -> None: """simple docstring""" lowercase_ : Union[str, Any] = """THE GERMAN ATTACK""" lowercase_ : str = """SECRET""" lowercase_ : int = generate_key(lowercase , lowercase ) lowercase_ : List[str] = cipher_text(lowercase , lowercase ) print(f"""Encrypted Text = {s}""" ) print(f"""Original Text = {original_text(lowercase , lowercase )}""" ) if __name__ == "__main__": import doctest doctest.testmod() main()
458
1
'''simple docstring''' class __snake_case : """simple docstring""" def __init__( self : Optional[Any] ) -> str: lowerCAmelCase_ : str = 0 lowerCAmelCase_ : Optional[int] = 0 lowerCAmelCase_ : Optional[Any] = {} def __lowercase ( self : Union[str, Any] , lowerCamelCase : Any ) -> Optional[Any]: if vertex not in self.adjacency: lowerCAmelCase_ : List[Any] = {} self.num_vertices += 1 def __lowercase ( self : List[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Tuple , lowerCamelCase : Optional[Any] ) -> Dict: self.add_vertex(lowerCamelCase ) self.add_vertex(lowerCamelCase ) if head == tail: return lowerCAmelCase_ : List[Any] = weight lowerCAmelCase_ : Tuple = weight def __lowercase ( self : Union[str, Any] ) -> Optional[Any]: lowerCAmelCase_ : Any = self.get_edges() for edge in edges: lowerCAmelCase_ : Optional[int] = edge edges.remove((tail, head, weight) ) for i in range(len(lowerCamelCase ) ): lowerCAmelCase_ : List[str] = list(edges[i] ) edges.sort(key=lambda lowerCamelCase : e[2] ) for i in range(len(lowerCamelCase ) - 1 ): if edges[i][2] >= edges[i + 1][2]: lowerCAmelCase_ : Any = edges[i][2] + 1 for edge in edges: lowerCAmelCase_ : List[str] = edge lowerCAmelCase_ : Optional[int] = weight lowerCAmelCase_ : Optional[int] = weight def __str__( self : Optional[Any] ) -> Any: lowerCAmelCase_ : List[str] = """""" for tail in self.adjacency: for head in self.adjacency[tail]: lowerCAmelCase_ : List[Any] = self.adjacency[head][tail] string += F'{head} -> {tail} == {weight}\n' return string.rstrip("""\n""" ) def __lowercase ( self : Tuple ) -> Dict: lowerCAmelCase_ : Optional[Any] = [] for tail in self.adjacency: for head in self.adjacency[tail]: output.append((tail, head, self.adjacency[head][tail]) ) return output def __lowercase ( self : int ) -> int: return self.adjacency.keys() @staticmethod def __lowercase ( lowerCamelCase : Tuple=None , lowerCamelCase : Union[str, Any]=None ) -> Any: lowerCAmelCase_ : str = Graph() if vertices is None: lowerCAmelCase_ : Dict = [] if edges is None: lowerCAmelCase_ : List[str] = [] for vertex in vertices: g.add_vertex(lowerCamelCase ) for edge in edges: g.add_edge(*lowerCamelCase ) return g class __snake_case : """simple docstring""" def __init__( self : str ) -> List[Any]: lowerCAmelCase_ : Union[str, Any] = {} lowerCAmelCase_ : Optional[Any] = {} def __len__( self : Optional[Any] ) -> int: return len(self.parent ) def __lowercase ( self : Optional[Any] , lowerCamelCase : Optional[Any] ) -> List[str]: if item in self.parent: return self.find(lowerCamelCase ) lowerCAmelCase_ : List[Any] = item lowerCAmelCase_ : Dict = 0 return item def __lowercase ( self : Optional[Any] , lowerCamelCase : int ) -> Optional[Any]: if item not in self.parent: return self.make_set(lowerCamelCase ) if item != self.parent[item]: lowerCAmelCase_ : List[str] = self.find(self.parent[item] ) return self.parent[item] def __lowercase ( self : Optional[int] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Dict ) -> List[str]: lowerCAmelCase_ : List[str] = self.find(lowerCamelCase ) lowerCAmelCase_ : List[str] = self.find(lowerCamelCase ) if roota == roota: return roota if self.rank[roota] > self.rank[roota]: lowerCAmelCase_ : Optional[Any] = roota return roota if self.rank[roota] < self.rank[roota]: lowerCAmelCase_ : int = roota return roota if self.rank[roota] == self.rank[roota]: self.rank[roota] += 1 lowerCAmelCase_ : Any = roota return roota return None @staticmethod def __lowercase ( lowerCamelCase : int ) -> List[str]: lowerCAmelCase_ : Optional[int] = graph.num_vertices lowerCAmelCase_ : Tuple = Graph.UnionFind() lowerCAmelCase_ : int = [] while num_components > 1: lowerCAmelCase_ : str = {} for vertex in graph.get_vertices(): lowerCAmelCase_ : int = -1 lowerCAmelCase_ : int = graph.get_edges() for edge in edges: lowerCAmelCase_ : List[Any] = edge edges.remove((tail, head, weight) ) for edge in edges: lowerCAmelCase_ : List[Any] = edge lowerCAmelCase_ : List[str] = union_find.find(lowerCamelCase ) lowerCAmelCase_ : List[str] = union_find.find(lowerCamelCase ) if seta != seta: if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight: lowerCAmelCase_ : List[Any] = [head, tail, weight] if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight: lowerCAmelCase_ : str = [head, tail, weight] for vertex in cheap_edge: if cheap_edge[vertex] != -1: lowerCAmelCase_ : Dict = cheap_edge[vertex] if union_find.find(lowerCamelCase ) != union_find.find(lowerCamelCase ): union_find.union(lowerCamelCase , lowerCamelCase ) mst_edges.append(cheap_edge[vertex] ) lowerCAmelCase_ : Tuple = num_components - 1 lowerCAmelCase_ : Tuple = Graph.build(edges=lowerCamelCase ) return mst
702
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __A : Tuple = logging.get_logger(__name__) __A : Optional[Any] = "▁" __A : Tuple = {"vocab_file": "sentencepiece.bpe.model"} __A : Tuple = { "vocab_file": { "facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model", } } __A : int = { "facebook/xglm-564M": 2048, } class __snake_case ( _SCREAMING_SNAKE_CASE): """simple docstring""" lowercase = VOCAB_FILES_NAMES lowercase = PRETRAINED_VOCAB_FILES_MAP lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase = ['input_ids', 'attention_mask'] def __init__( self : Any , lowerCamelCase : Any , lowerCamelCase : str="<s>" , lowerCamelCase : Optional[int]="</s>" , lowerCamelCase : Optional[Any]="</s>" , lowerCamelCase : List[Any]="<s>" , lowerCamelCase : Optional[Any]="<unk>" , lowerCamelCase : int="<pad>" , lowerCamelCase : Optional[Dict[str, Any]] = None , **lowerCamelCase : Optional[Any] , ) -> None: lowerCAmelCase_ : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs # Compatibility with the original tokenizer lowerCAmelCase_ : str = 7 lowerCAmelCase_ : Any = [F'<madeupword{i}>' for i in range(self.num_madeup_words )] lowerCAmelCase_ : Optional[Any] = kwargs.get("""additional_special_tokens""" , [] ) kwargs["additional_special_tokens"] += [ word for word in madeup_words if word not in kwargs["additional_special_tokens"] ] super().__init__( bos_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , cls_token=lowerCamelCase , pad_token=lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase , ) lowerCAmelCase_ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(lowerCamelCase ) ) lowerCAmelCase_ : int = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab lowerCAmelCase_ : List[str] = 1 # Mimic fairseq token-to-id alignment for the first 4 token lowerCAmelCase_ : Any = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3} lowerCAmelCase_ : Union[str, Any] = len(self.sp_model ) lowerCAmelCase_ : Any = {F'<madeupword{i}>': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )} self.fairseq_tokens_to_ids.update(lowerCamelCase ) lowerCAmelCase_ : int = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self : int ) -> Union[str, Any]: lowerCAmelCase_ : Union[str, Any] = self.__dict__.copy() lowerCAmelCase_ : str = None lowerCAmelCase_ : List[str] = self.sp_model.serialized_model_proto() return state def __setstate__( self : Dict , lowerCamelCase : List[Any] ) -> List[Any]: lowerCAmelCase_ : Union[str, Any] = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): lowerCAmelCase_ : int = {} lowerCAmelCase_ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def __lowercase ( self : List[Any] , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ) -> List[int]: if token_ids_a is None: return [self.sep_token_id] + token_ids_a lowerCAmelCase_ : List[Any] = [self.sep_token_id] return sep + token_ids_a + sep + sep + token_ids_a def __lowercase ( self : Tuple , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None , lowerCamelCase : bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase ) if token_ids_a is None: return [1] + ([0] * len(lowerCamelCase )) return [1] + ([0] * len(lowerCamelCase )) + [1, 1] + ([0] * len(lowerCamelCase )) def __lowercase ( self : str , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ) -> List[int]: lowerCAmelCase_ : Dict = [self.sep_token_id] if token_ids_a is None: return len(sep + token_ids_a ) * [0] return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0] @property def __lowercase ( self : str ) -> Union[str, Any]: return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words def __lowercase ( self : Optional[Any] ) -> Dict: lowerCAmelCase_ : Optional[Any] = {self.convert_ids_to_tokens(lowerCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __lowercase ( self : int , lowerCamelCase : str ) -> List[str]: return self.sp_model.encode(lowerCamelCase , out_type=lowerCamelCase ) def __lowercase ( self : int , lowerCamelCase : Dict ) -> Any: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] lowerCAmelCase_ : int = self.sp_model.PieceToId(lowerCamelCase ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def __lowercase ( self : Dict , lowerCamelCase : Optional[int] ) -> Any: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def __lowercase ( self : List[str] , lowerCamelCase : Optional[Any] ) -> Optional[Any]: lowerCAmelCase_ : str = """""".join(lowerCamelCase ).replace(lowerCamelCase , """ """ ).strip() return out_string def __lowercase ( self : Optional[Any] , lowerCamelCase : str , lowerCamelCase : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(lowerCamelCase ): logger.error(F'Vocabulary path ({save_directory}) should be a directory' ) return lowerCAmelCase_ : List[str] = os.path.join( lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , lowerCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(lowerCamelCase , """wb""" ) as fi: lowerCAmelCase_ : Union[str, Any] = self.sp_model.serialized_model_proto() fi.write(lowerCamelCase ) return (out_vocab_file,)
398
0
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> list: if n_term == "": return [] lowercase : str = [] for temp in range(int(snake_case__ ) ): series.append(f"1/{temp + 1}" if series else """1""" ) return series if __name__ == "__main__": lowercase : List[Any] = input("""Enter the last number (nth term) of the Harmonic Series""") print("""Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n""") print(harmonic_series(nth_term))
336
import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __lowercase ( lowerCamelCase__ ): __UpperCAmelCase = ['''image_processor''', '''tokenizer'''] __UpperCAmelCase = '''ViltImageProcessor''' __UpperCAmelCase = ('''BertTokenizer''', '''BertTokenizerFast''') def __init__( self , lowercase_=None , lowercase_=None , **lowercase_) -> List[Any]: __snake_case = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , lowercase_ , ) __snake_case = kwargs.pop('feature_extractor') __snake_case = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.') if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.') super().__init__(lowercase_ , lowercase_) __snake_case = self.image_processor def __call__( self , lowercase_ , lowercase_ = None , lowercase_ = True , lowercase_ = False , lowercase_ = None , lowercase_ = None , lowercase_ = 0 , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = False , lowercase_ = False , lowercase_ = False , lowercase_ = False , lowercase_ = True , lowercase_ = None , **lowercase_ , ) -> BatchEncoding: __snake_case = self.tokenizer( text=lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , stride=lowercase_ , pad_to_multiple_of=lowercase_ , return_token_type_ids=lowercase_ , return_attention_mask=lowercase_ , return_overflowing_tokens=lowercase_ , return_special_tokens_mask=lowercase_ , return_offsets_mapping=lowercase_ , return_length=lowercase_ , verbose=lowercase_ , return_tensors=lowercase_ , **lowercase_ , ) # add pixel_values + pixel_mask __snake_case = self.image_processor(lowercase_ , return_tensors=lowercase_) encoding.update(lowercase_) return encoding def _a ( self , *lowercase_ , **lowercase_) -> Optional[Any]: return self.tokenizer.batch_decode(*lowercase_ , **lowercase_) def _a ( self , *lowercase_ , **lowercase_) -> Dict: return self.tokenizer.decode(*lowercase_ , **lowercase_) @property def _a ( self) -> Tuple: __snake_case = self.tokenizer.model_input_names __snake_case = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) @property def _a ( self) -> Optional[int]: warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , lowercase_ , ) return self.image_processor_class @property def _a ( self) -> List[str]: warnings.warn( '`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , lowercase_ , ) return self.image_processor
313
0
"""simple docstring""" import tempfile import unittest import numpy as np from diffusers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionPipeline, PNDMScheduler, ) from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class _lowerCamelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): UpperCAmelCase_ = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline" def snake_case_ (self , __a=0 ) -> str: UpperCamelCase = np.random.RandomState(_a ) UpperCamelCase = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 7.5, """output_type""": """numpy""", } return inputs def snake_case_ (self ) -> Optional[Any]: UpperCamelCase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) pipe.set_progress_bar_config(disable=_a ) UpperCamelCase = self.get_dummy_inputs() UpperCamelCase = pipe(**_a ).images UpperCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) UpperCamelCase = np.array([0.65072, 0.58492, 0.48219, 0.55521, 0.53180, 0.55939, 0.50697, 0.39800, 0.46455] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def snake_case_ (self ) -> str: UpperCamelCase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) UpperCamelCase = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_a ) pipe.set_progress_bar_config(disable=_a ) UpperCamelCase = self.get_dummy_inputs() UpperCamelCase = pipe(**_a ).images UpperCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) UpperCamelCase = np.array([0.65863, 0.59425, 0.49326, 0.56313, 0.53875, 0.56627, 0.51065, 0.39777, 0.46330] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def snake_case_ (self ) -> Tuple: UpperCamelCase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) UpperCamelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_a ) UpperCamelCase = self.get_dummy_inputs() UpperCamelCase = pipe(**_a ).images UpperCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) UpperCamelCase = np.array([0.53755, 0.60786, 0.47402, 0.49488, 0.51869, 0.49819, 0.47985, 0.38957, 0.44279] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def snake_case_ (self ) -> List[Any]: UpperCamelCase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) UpperCamelCase = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_a ) UpperCamelCase = self.get_dummy_inputs() UpperCamelCase = pipe(**_a ).images UpperCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) UpperCamelCase = np.array([0.53755, 0.60786, 0.47402, 0.49488, 0.51869, 0.49819, 0.47985, 0.38957, 0.44279] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def snake_case_ (self ) -> List[Any]: UpperCamelCase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) UpperCamelCase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_a ) UpperCamelCase = self.get_dummy_inputs() UpperCamelCase = pipe(**_a ).images UpperCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) UpperCamelCase = np.array([0.53817, 0.60812, 0.47384, 0.49530, 0.51894, 0.49814, 0.47984, 0.38958, 0.44271] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def snake_case_ (self ) -> int: UpperCamelCase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) UpperCamelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_a ) UpperCamelCase = self.get_dummy_inputs() UpperCamelCase = pipe(**_a ).images UpperCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) UpperCamelCase = np.array([0.53895, 0.60808, 0.47933, 0.49608, 0.51886, 0.49950, 0.48053, 0.38957, 0.44200] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def snake_case_ (self ) -> int: UpperCamelCase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) pipe.set_progress_bar_config(disable=_a ) UpperCamelCase = self.get_dummy_inputs() UpperCamelCase = 3 * [inputs["""prompt"""]] # forward UpperCamelCase = pipe(**_a ) UpperCamelCase = output.images[0, -3:, -3:, -1] UpperCamelCase = self.get_dummy_inputs() UpperCamelCase = 3 * [inputs.pop("prompt" )] UpperCamelCase = pipe.tokenizer( _a , padding="max_length" , max_length=pipe.tokenizer.model_max_length , truncation=_a , return_tensors="np" , ) UpperCamelCase = text_inputs["""input_ids"""] UpperCamelCase = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] UpperCamelCase = prompt_embeds # forward UpperCamelCase = pipe(**_a ) UpperCamelCase = output.images[0, -3:, -3:, -1] assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4 def snake_case_ (self ) -> Any: UpperCamelCase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) pipe.set_progress_bar_config(disable=_a ) UpperCamelCase = self.get_dummy_inputs() UpperCamelCase = 3 * ["""this is a negative prompt"""] UpperCamelCase = negative_prompt UpperCamelCase = 3 * [inputs["""prompt"""]] # forward UpperCamelCase = pipe(**_a ) UpperCamelCase = output.images[0, -3:, -3:, -1] UpperCamelCase = self.get_dummy_inputs() UpperCamelCase = 3 * [inputs.pop("prompt" )] UpperCamelCase = [] for p in [prompt, negative_prompt]: UpperCamelCase = pipe.tokenizer( _a , padding="max_length" , max_length=pipe.tokenizer.model_max_length , truncation=_a , return_tensors="np" , ) UpperCamelCase = text_inputs["""input_ids"""] embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] ) UpperCamelCase = embeds # forward UpperCamelCase = pipe(**_a ) UpperCamelCase = output.images[0, -3:, -3:, -1] assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4 @nightly @require_onnxruntime @require_torch_gpu class _lowerCamelCase ( unittest.TestCase ): @property def snake_case_ (self ) -> Optional[int]: return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def snake_case_ (self ) -> int: UpperCamelCase = ort.SessionOptions() UpperCamelCase = False return options def snake_case_ (self ) -> Optional[int]: UpperCamelCase = OnnxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=_a , feature_extractor=_a , provider=self.gpu_provider , sess_options=self.gpu_options , ) sd_pipe.set_progress_bar_config(disable=_a ) UpperCamelCase = """A painting of a squirrel eating a burger""" np.random.seed(0 ) UpperCamelCase = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type="np" ) UpperCamelCase = output.images UpperCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) UpperCamelCase = np.array([0.0452, 0.0390, 0.0087, 0.0350, 0.0617, 0.0364, 0.0544, 0.0523, 0.0720] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def snake_case_ (self ) -> Dict: UpperCamelCase = DDIMScheduler.from_pretrained( "runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" ) UpperCamelCase = OnnxStableDiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=_a , safety_checker=_a , feature_extractor=_a , provider=self.gpu_provider , sess_options=self.gpu_options , ) sd_pipe.set_progress_bar_config(disable=_a ) UpperCamelCase = """open neural network exchange""" UpperCamelCase = np.random.RandomState(0 ) UpperCamelCase = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_a , output_type="np" ) UpperCamelCase = output.images UpperCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) UpperCamelCase = np.array([0.2867, 0.1974, 0.1481, 0.7294, 0.7251, 0.6667, 0.4194, 0.5642, 0.6486] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def snake_case_ (self ) -> Dict: UpperCamelCase = LMSDiscreteScheduler.from_pretrained( "runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" ) UpperCamelCase = OnnxStableDiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=_a , safety_checker=_a , feature_extractor=_a , provider=self.gpu_provider , sess_options=self.gpu_options , ) sd_pipe.set_progress_bar_config(disable=_a ) UpperCamelCase = """open neural network exchange""" UpperCamelCase = np.random.RandomState(0 ) UpperCamelCase = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_a , output_type="np" ) UpperCamelCase = output.images UpperCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) UpperCamelCase = np.array([0.2306, 0.1959, 0.1593, 0.6549, 0.6394, 0.5408, 0.5065, 0.6010, 0.6161] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def snake_case_ (self ) -> Any: UpperCamelCase = 0 def test_callback_fn(__a , __a , __a ) -> None: UpperCamelCase = True nonlocal number_of_steps number_of_steps += 1 if step == 0: assert latents.shape == (1, 4, 64, 64) UpperCamelCase = latents[0, -3:, -3:, -1] UpperCamelCase = np.array( [-0.6772, -0.3835, -1.2456, 0.1905, -1.0974, 0.6967, -1.9353, 0.0178, 1.0167] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3 elif step == 5: assert latents.shape == (1, 4, 64, 64) UpperCamelCase = latents[0, -3:, -3:, -1] UpperCamelCase = np.array( [-0.3351, 0.2241, -0.1837, -0.2325, -0.6577, 0.3393, -0.0241, 0.5899, 1.3875] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3 UpperCamelCase = False UpperCamelCase = OnnxStableDiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5" , revision="onnx" , safety_checker=_a , feature_extractor=_a , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=_a ) UpperCamelCase = """Andromeda galaxy in a bottle""" UpperCamelCase = np.random.RandomState(0 ) pipe( prompt=_a , num_inference_steps=5 , guidance_scale=7.5 , generator=_a , callback=_a , callback_steps=1 , ) assert test_callback_fn.has_been_called assert number_of_steps == 6 def snake_case_ (self ) -> Union[str, Any]: UpperCamelCase = OnnxStableDiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5" , revision="onnx" , safety_checker=_a , feature_extractor=_a , provider=self.gpu_provider , sess_options=self.gpu_options , ) assert isinstance(_a , _a ) assert pipe.safety_checker is None UpperCamelCase = pipe("example prompt" , num_inference_steps=2 ).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(_a ) UpperCamelCase = OnnxStableDiffusionPipeline.from_pretrained(_a ) # sanity check that the pipeline still works assert pipe.safety_checker is None UpperCamelCase = pipe("example prompt" , num_inference_steps=2 ).images[0] assert image is not None
705
"""simple docstring""" import importlib import os import sys # This is required to make the module import works (when the python process is running from the root of the repo) sys.path.append('''.''') def a__ ( _SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase = test_file.split(os.path.sep ) if components[0:2] != ["tests", "models"]: raise ValueError( "`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got " F"{test_file} instead." ) UpperCamelCase = components[-1] if not test_fn.endswith("py" ): raise ValueError(F"`test_file` should be a python file. Got {test_fn} instead." ) if not test_fn.startswith("test_modeling_" ): raise ValueError( F"`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead." ) UpperCamelCase = components[:-1] + [test_fn.replace(".py" , "" )] UpperCamelCase = ".".join(_SCREAMING_SNAKE_CASE ) return test_module_path def a__ ( _SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase = get_module_path(_SCREAMING_SNAKE_CASE ) UpperCamelCase = importlib.import_module(_SCREAMING_SNAKE_CASE ) return test_module def a__ ( _SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase = [] UpperCamelCase = get_test_module(_SCREAMING_SNAKE_CASE ) for attr in dir(_SCREAMING_SNAKE_CASE ): if attr.endswith("ModelTester" ): tester_classes.append(getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) # sort with class names return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ ) def a__ ( _SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase = [] UpperCamelCase = get_test_module(_SCREAMING_SNAKE_CASE ) for attr in dir(_SCREAMING_SNAKE_CASE ): UpperCamelCase = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking # `all_model_classes` is not empty (which also excludes other special classes). UpperCamelCase = getattr(_SCREAMING_SNAKE_CASE , "all_model_classes" , [] ) if len(_SCREAMING_SNAKE_CASE ) > 0: test_classes.append(_SCREAMING_SNAKE_CASE ) # sort with class names return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ ) def a__ ( _SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase = get_test_classes(_SCREAMING_SNAKE_CASE ) UpperCamelCase = set() for test_class in test_classes: model_classes.update(test_class.all_model_classes ) # sort with class names return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ ) def a__ ( _SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase = test_class() if hasattr(_SCREAMING_SNAKE_CASE , "setUp" ): test.setUp() UpperCamelCase = None if hasattr(_SCREAMING_SNAKE_CASE , "model_tester" ): # `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case. if test.model_tester is not None: UpperCamelCase = test.model_tester.__class__ return model_tester def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase = get_test_classes(_SCREAMING_SNAKE_CASE ) UpperCamelCase = [] for test_class in test_classes: if model_class in test_class.all_model_classes: target_test_classes.append(_SCREAMING_SNAKE_CASE ) # sort with class names return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ ) def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase = get_test_classes_for_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) UpperCamelCase = [] for test_class in test_classes: UpperCamelCase = get_model_tester_from_test_class(_SCREAMING_SNAKE_CASE ) if tester_class is not None: tester_classes.append(_SCREAMING_SNAKE_CASE ) # sort with class names return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ ) def a__ ( _SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase = get_test_classes(_SCREAMING_SNAKE_CASE ) UpperCamelCase = {test_class: get_model_tester_from_test_class(_SCREAMING_SNAKE_CASE ) for test_class in test_classes} return test_tester_mapping def a__ ( _SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase = get_model_classes(_SCREAMING_SNAKE_CASE ) UpperCamelCase = { model_class: get_test_classes_for_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for model_class in model_classes } return model_test_mapping def a__ ( _SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase = get_model_classes(_SCREAMING_SNAKE_CASE ) UpperCamelCase = { model_class: get_tester_classes_for_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for model_class in model_classes } return model_to_tester_mapping def a__ ( _SCREAMING_SNAKE_CASE ): """simple docstring""" if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): return o elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): return o.__name__ elif isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ): return [to_json(_SCREAMING_SNAKE_CASE ) for x in o] elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): return {to_json(_SCREAMING_SNAKE_CASE ): to_json(_SCREAMING_SNAKE_CASE ) for k, v in o.items()} else: return o
544
0
'''simple docstring''' from __future__ import annotations import inspect import unittest from transformers import ViTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTForImageClassification, TFViTModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class SCREAMING_SNAKE_CASE : def __init__( self : str , A__ : str , A__ : Union[str, Any]=13 , A__ : str=30 , A__ : Optional[int]=2 , A__ : Optional[Any]=3 , A__ : List[str]=True , A__ : Union[str, Any]=True , A__ : Any=32 , A__ : List[str]=2 , A__ : Any=4 , A__ : Optional[Any]=37 , A__ : int="gelu" , A__ : List[Any]=0.1 , A__ : Tuple=0.1 , A__ : Any=10 , A__ : List[Any]=0.02 , A__ : List[Any]=3 , A__ : List[str]=None , ): """simple docstring""" __lowerCamelCase : Optional[int] = parent __lowerCamelCase : Union[str, Any] = batch_size __lowerCamelCase : List[str] = image_size __lowerCamelCase : str = patch_size __lowerCamelCase : List[str] = num_channels __lowerCamelCase : List[str] = is_training __lowerCamelCase : Optional[Any] = use_labels __lowerCamelCase : Optional[Any] = hidden_size __lowerCamelCase : Optional[int] = num_hidden_layers __lowerCamelCase : Optional[int] = num_attention_heads __lowerCamelCase : str = intermediate_size __lowerCamelCase : Union[str, Any] = hidden_act __lowerCamelCase : Dict = hidden_dropout_prob __lowerCamelCase : Tuple = attention_probs_dropout_prob __lowerCamelCase : List[Any] = type_sequence_label_size __lowerCamelCase : int = initializer_range __lowerCamelCase : List[Any] = scope # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) __lowerCamelCase : List[Any] = (image_size // patch_size) ** 2 __lowerCamelCase : Tuple = num_patches + 1 def a_ ( self : Optional[int] ): """simple docstring""" __lowerCamelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowerCamelCase : Dict = None if self.use_labels: __lowerCamelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCamelCase : Optional[Any] = self.get_config() return config, pixel_values, labels def a_ ( self : Tuple ): """simple docstring""" return ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A__ , initializer_range=self.initializer_range , ) def a_ ( self : Optional[int] , A__ : Optional[int] , A__ : List[Any] , A__ : int ): """simple docstring""" __lowerCamelCase : int = TFViTModel(config=A__ ) __lowerCamelCase : Union[str, Any] = model(A__ , training=A__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # Test with an image with different size than the one specified in config. __lowerCamelCase : Dict = self.image_size // 2 __lowerCamelCase : Optional[Any] = pixel_values[:, :, :image_size, :image_size] __lowerCamelCase : Union[str, Any] = model(A__ , interpolate_pos_encoding=A__ , training=A__ ) __lowerCamelCase : int = (image_size // self.patch_size) ** 2 + 1 self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) ) def a_ ( self : List[Any] , A__ : Dict , A__ : str , A__ : Union[str, Any] ): """simple docstring""" __lowerCamelCase : Dict = self.type_sequence_label_size __lowerCamelCase : List[Any] = TFViTForImageClassification(A__ ) __lowerCamelCase : Union[str, Any] = model(A__ , labels=A__ , training=A__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # Test with an image with different size than the one specified in config. __lowerCamelCase : str = self.image_size // 2 __lowerCamelCase : int = pixel_values[:, :, :image_size, :image_size] __lowerCamelCase : Optional[Any] = model(A__ , interpolate_pos_encoding=A__ , training=A__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images __lowerCamelCase : List[str] = 1 __lowerCamelCase : Union[str, Any] = TFViTForImageClassification(A__ ) __lowerCamelCase : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __lowerCamelCase : Dict = model(A__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def a_ ( self : str ): """simple docstring""" __lowerCamelCase : int = self.prepare_config_and_inputs() __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[Any] = config_and_inputs __lowerCamelCase : Optional[Any] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): snake_case__ : List[str] = (TFViTModel, TFViTForImageClassification) if is_tf_available() else () snake_case__ : Tuple = ( {'feature-extraction': TFViTModel, 'image-classification': TFViTForImageClassification} if is_tf_available() else {} ) snake_case__ : Any = False snake_case__ : Dict = False snake_case__ : Any = False def a_ ( self : str ): """simple docstring""" __lowerCamelCase : List[Any] = TFViTModelTester(self ) __lowerCamelCase : Optional[int] = ConfigTester(self , config_class=A__ , has_text_modality=A__ , hidden_size=37 ) def a_ ( self : Dict ): """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason="""ViT does not use inputs_embeds""" ) def a_ ( self : Optional[int] ): """simple docstring""" pass @unittest.skip(reason="""ViT does not use inputs_embeds""" ) def a_ ( self : int ): """simple docstring""" pass def a_ ( self : Any ): """simple docstring""" __lowerCamelCase , __lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase : str = model_class(A__ ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) __lowerCamelCase : Optional[int] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(A__ , tf.keras.layers.Layer ) ) def a_ ( self : Any ): """simple docstring""" __lowerCamelCase , __lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase : Optional[int] = model_class(A__ ) __lowerCamelCase : List[str] = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowerCamelCase : Optional[int] = [*signature.parameters.keys()] __lowerCamelCase : Optional[int] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , A__ ) def a_ ( self : Optional[Any] ): """simple docstring""" __lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A__ ) def a_ ( self : Any ): """simple docstring""" __lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*A__ ) @slow def a_ ( self : Dict ): """simple docstring""" __lowerCamelCase : str = TFViTModel.from_pretrained("""google/vit-base-patch16-224""" ) self.assertIsNotNone(A__ ) def __lowercase () -> int: """simple docstring""" __lowerCamelCase : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class SCREAMING_SNAKE_CASE ( unittest.TestCase ): @cached_property def a_ ( self : Any ): """simple docstring""" return ViTImageProcessor.from_pretrained("""google/vit-base-patch16-224""" ) if is_vision_available() else None @slow def a_ ( self : Any ): """simple docstring""" __lowerCamelCase : List[Any] = TFViTForImageClassification.from_pretrained("""google/vit-base-patch16-224""" ) __lowerCamelCase : int = self.default_image_processor __lowerCamelCase : Optional[int] = prepare_img() __lowerCamelCase : Any = image_processor(images=A__ , return_tensors="""tf""" ) # forward pass __lowerCamelCase : Optional[int] = model(**A__ ) # verify the logits __lowerCamelCase : List[Any] = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , A__ ) __lowerCamelCase : List[Any] = tf.constant([-0.2744, 0.8215, -0.0836] ) tf.debugging.assert_near(outputs.logits[0, :3] , A__ , atol=1e-4 )
150
'''simple docstring''' import os import unittest from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer from ...test_tokenization_common import TokenizerTesterMixin class SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ): snake_case__ : Dict = PhobertTokenizer snake_case__ : Optional[Any] = False def a_ ( self : Optional[int] ): """simple docstring""" super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt __lowerCamelCase : Optional[int] = ["""T@@""", """i""", """I""", """R@@""", """r""", """e@@"""] __lowerCamelCase : List[Any] = dict(zip(A__ , range(len(A__ ) ) ) ) __lowerCamelCase : Optional[Any] = ["""#version: 0.2""", """l à</w>"""] __lowerCamelCase : Any = {"""unk_token""": """<unk>"""} __lowerCamelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) __lowerCamelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: for token in vocab_tokens: fp.write(f"{token} {vocab_tokens[token]}\n" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(A__ ) ) def a_ ( self : List[str] , **A__ : Union[str, Any] ): """simple docstring""" kwargs.update(self.special_tokens_map ) return PhobertTokenizer.from_pretrained(self.tmpdirname , **A__ ) def a_ ( self : Union[str, Any] , A__ : Optional[Any] ): """simple docstring""" __lowerCamelCase : int = """Tôi là VinAI Research""" __lowerCamelCase : int = """T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>""" return input_text, output_text def a_ ( self : Union[str, Any] ): """simple docstring""" __lowerCamelCase : List[str] = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) __lowerCamelCase : Dict = """Tôi là VinAI Research""" __lowerCamelCase : Any = """T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h""".split() __lowerCamelCase : str = tokenizer.tokenize(A__ ) print(A__ ) self.assertListEqual(A__ , A__ ) __lowerCamelCase : Any = tokens + [tokenizer.unk_token] __lowerCamelCase : int = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3] self.assertListEqual(tokenizer.convert_tokens_to_ids(A__ ) , A__ )
150
1
"""simple docstring""" def lowercase__ ( lowerCAmelCase : list[list[float]] ) -> list[list[float]]: """simple docstring""" UpperCAmelCase = [] for data in source_data: for i, el in enumerate(lowerCAmelCase ): if len(lowerCAmelCase ) < i + 1: data_lists.append([] ) data_lists[i].append(float(lowerCAmelCase ) ) return data_lists def lowercase__ ( lowerCAmelCase : list[list[float]] , lowerCAmelCase : list[int] ) -> list[list[float]]: """simple docstring""" UpperCAmelCase = [] for dlist, weight in zip(lowerCAmelCase , lowerCAmelCase ): UpperCAmelCase = min(lowerCAmelCase ) UpperCAmelCase = max(lowerCAmelCase ) UpperCAmelCase = [] # for weight 0 score is 1 - actual score if weight == 0: for item in dlist: try: score.append(1 - ((item - mind) / (maxd - mind)) ) except ZeroDivisionError: score.append(1 ) elif weight == 1: for item in dlist: try: score.append((item - mind) / (maxd - mind) ) except ZeroDivisionError: score.append(0 ) # weight not 0 or 1 else: UpperCAmelCase = F"Invalid weight of {weight:f} provided" raise ValueError(lowerCAmelCase ) score_lists.append(lowerCAmelCase ) return score_lists def lowercase__ ( lowerCAmelCase : list[list[float]] ) -> list[float]: """simple docstring""" UpperCAmelCase = [0 for i in range(len(score_lists[0] ) )] for slist in score_lists: for j, ele in enumerate(lowerCAmelCase ): UpperCAmelCase = final_scores[j] + ele return final_scores def lowercase__ ( lowerCAmelCase : list[list[float]] , lowerCAmelCase : list[int] ) -> list[list[float]]: """simple docstring""" UpperCAmelCase = get_data(lowerCAmelCase ) UpperCAmelCase = calculate_each_score(lowerCAmelCase , lowerCAmelCase ) UpperCAmelCase = generate_final_scores(lowerCAmelCase ) # append scores to source data for i, ele in enumerate(lowerCAmelCase ): source_data[i].append(lowerCAmelCase ) return source_data
183
"""simple docstring""" from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments from transformers.testing_utils import TestCasePlus, require_torch, slow from transformers.utils import is_datasets_available if is_datasets_available(): import datasets class _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ ): @slow @require_torch def a_ ( self ) -> List[Any]: UpperCAmelCase = EncoderDecoderModel.from_encoder_decoder_pretrained('prajjwal1/bert-tiny' , 'prajjwal1/bert-tiny' ) UpperCAmelCase = BertTokenizer.from_pretrained('bert-base-uncased' ) UpperCAmelCase = bertabert.config.encoder.vocab_size UpperCAmelCase = tokenizer.sep_token_id UpperCAmelCase = tokenizer.cls_token_id UpperCAmelCase = 1_2_8 UpperCAmelCase = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='train[:1%]' ) UpperCAmelCase = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='validation[:1%]' ) UpperCAmelCase = train_dataset.select(range(3_2 ) ) UpperCAmelCase = val_dataset.select(range(1_6 ) ) UpperCAmelCase = 4 def _map_to_encoder_decoder_inputs(lowercase_ ): # Tokenizer will automatically set [BOS] <text> [EOS] UpperCAmelCase = tokenizer(batch['article'] , padding='max_length' , truncation=lowercase_ , max_length=5_1_2 ) UpperCAmelCase = tokenizer(batch['highlights'] , padding='max_length' , truncation=lowercase_ , max_length=1_2_8 ) UpperCAmelCase = inputs.input_ids UpperCAmelCase = inputs.attention_mask UpperCAmelCase = outputs.input_ids UpperCAmelCase = outputs.input_ids.copy() UpperCAmelCase = [ [-1_0_0 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['labels'] ] UpperCAmelCase = outputs.attention_mask assert all(len(lowercase_ ) == 5_1_2 for x in inputs.input_ids ) assert all(len(lowercase_ ) == 1_2_8 for x in outputs.input_ids ) return batch def _compute_metrics(lowercase_ ): UpperCAmelCase = pred.label_ids UpperCAmelCase = pred.predictions # all unnecessary tokens are removed UpperCAmelCase = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ ) UpperCAmelCase = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ ) UpperCAmelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(lowercase_ ) )] ) / len(lowercase_ ) return {"accuracy": accuracy} # map train dataset UpperCAmelCase = train_dataset.map( _map_to_encoder_decoder_inputs , batched=lowercase_ , batch_size=lowercase_ , remove_columns=['article', 'highlights'] , ) train_dataset.set_format( type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , ) # same for validation dataset UpperCAmelCase = val_dataset.map( _map_to_encoder_decoder_inputs , batched=lowercase_ , batch_size=lowercase_ , remove_columns=['article', 'highlights'] , ) val_dataset.set_format( type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , ) UpperCAmelCase = self.get_auto_remove_tmp_dir() UpperCAmelCase = SeqaSeqTrainingArguments( output_dir=lowercase_ , per_device_train_batch_size=lowercase_ , per_device_eval_batch_size=lowercase_ , predict_with_generate=lowercase_ , evaluation_strategy='steps' , do_train=lowercase_ , do_eval=lowercase_ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , ) # instantiate trainer UpperCAmelCase = SeqaSeqTrainer( model=lowercase_ , args=lowercase_ , compute_metrics=_compute_metrics , train_dataset=lowercase_ , eval_dataset=lowercase_ , tokenizer=lowercase_ , ) # start training trainer.train()
183
1
from ..utils import DummyObject, requires_backends class SCREAMING_SNAKE_CASE__ ( metaclass=UpperCAmelCase ): '''simple docstring''' _UpperCAmelCase : Tuple = ["transformers", "torch", "note_seq"] def __init__( self : List[Any] , *lowercase : List[Any] , **lowercase : Dict ): '''simple docstring''' requires_backends(self , ['transformers', 'torch', 'note_seq'] ) @classmethod def A ( cls : Union[str, Any] , *lowercase : List[str] , **lowercase : Any ): '''simple docstring''' requires_backends(cls , ['transformers', 'torch', 'note_seq'] ) @classmethod def A ( cls : Union[str, Any] , *lowercase : List[str] , **lowercase : List[Any] ): '''simple docstring''' requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
686
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path: # hack it in for now: import sys from pathlib import Path _lowerCamelCase : Union[str, Any] = Path(__file__).resolve().parents[3] / '''src''' sys.path.insert(1, str(git_repo_path)) import dataclasses # noqa import io # noqa import itertools # noqa import json # noqa import os # noqa import unittest # noqa from copy import deepcopy # noqa from parameterized import parameterized # noqa from transformers import TrainingArguments, is_torch_available # noqa from transformers.deepspeed import is_deepspeed_available # noqa from transformers.file_utils import WEIGHTS_NAME # noqa from transformers.testing_utils import ( # noqa CaptureLogger, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, mockenv_context, require_deepspeed, require_torch_gpu, require_torch_multi_gpu, slow, ) from transformers.trainer_utils import set_seed # noqa set_seed(42) _lowerCamelCase : Union[str, Any] = {'''base''': '''patrickvonplaten/wav2vec2_tiny_random''', '''robust''': '''patrickvonplaten/wav2vec2_tiny_random_robust'''} _lowerCamelCase : Optional[int] = '''zero2''' _lowerCamelCase : List[Any] = '''zero3''' _lowerCamelCase : Dict = [ZEROa, ZEROa] def a_ ( __lowercase : Union[str, Any] , __lowercase : Union[str, Any] , __lowercase : Tuple ) -> Dict: # customize the test name generator function as we want both params to appear in the sub-test # name, as by default it shows only the first param _snake_case = parameterized.to_safe_name('_'.join(str(__lowercase ) for x in param.args ) ) return f'''{func.__name__}_{param_based_name}''' # Cartesian-product of zero stages with models to test _lowerCamelCase : Dict = list(itertools.product(stages, models.keys())) @slow @require_deepspeed @require_torch_gpu class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ): '''simple docstring''' @parameterized.expand(lowercase , name_func=lowercase ) def A ( self : List[str] , lowercase : List[Any] , lowercase : Dict ): '''simple docstring''' self.run_and_check( stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , ) @require_torch_multi_gpu @parameterized.expand(lowercase , name_func=lowercase ) def A ( self : Any , lowercase : str , lowercase : List[str] ): '''simple docstring''' self.run_and_check( stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , ) @parameterized.expand(lowercase , name_func=lowercase ) def A ( self : List[str] , lowercase : Optional[Any] , lowercase : Optional[int] ): '''simple docstring''' self.run_and_check( stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , ) @require_torch_multi_gpu @parameterized.expand(lowercase , name_func=lowercase ) def A ( self : Optional[int] , lowercase : Union[str, Any] , lowercase : Union[str, Any] ): '''simple docstring''' self.run_and_check( stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , ) def A ( self : List[str] , lowercase : Optional[Any] ): '''simple docstring''' pass def A ( self : str , lowercase : str , lowercase : str , lowercase : int = 10 , lowercase : bool = True , lowercase : bool = True , lowercase : bool = True , ): '''simple docstring''' _snake_case = models[model] _snake_case = self.run_trainer( stage=lowercase , model_name=lowercase , eval_steps=lowercase , num_train_epochs=1 , distributed=lowercase , fpaa=lowercase , ) self.do_checks(lowercase ) return output_dir def A ( self : Any , lowercase : str , lowercase : str , lowercase : int = 10 , lowercase : int = 1 , lowercase : bool = True , lowercase : bool = True , ): '''simple docstring''' _snake_case = self.get_auto_remove_tmp_dir('./xxx' , after=lowercase ) _snake_case = f''' --model_name_or_path {model_name} --dataset_name hf-internal-testing/librispeech_asr_dummy --dataset_config_name clean --train_split_name validation --validation_split_name validation --output_dir {output_dir} --num_train_epochs {str(lowercase )} --per_device_train_batch_size 2 --per_device_eval_batch_size 2 --evaluation_strategy steps --learning_rate 5e-4 --warmup_steps 8 --orthography timit --preprocessing_num_workers 1 --group_by_length --freeze_feature_extractor --report_to none --save_steps 0 --eval_steps {eval_steps} --report_to none '''.split() if fpaa: args.extend(['--fp16'] ) # currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true, # hence the separate config files _snake_case = f'''--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'''.split() _snake_case = [f'''{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py'''] _snake_case = self.get_launcher(lowercase ) _snake_case = launcher + script + args + ds_args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(lowercase , env=self.get_env() ) return output_dir def A ( self : List[str] , lowercase : Any=False ): '''simple docstring''' _snake_case = min(2 , get_gpu_count() ) if distributed else 1 return f'''deepspeed --num_nodes 1 --num_gpus {num_gpus}'''.split()
686
1
from .imports import is_tqdm_available if is_tqdm_available(): from tqdm.auto import tqdm as _tqdm from ..state import PartialState def UpperCAmelCase_ ( __UpperCamelCase = True, *__UpperCamelCase, **__UpperCamelCase ): if not is_tqdm_available(): raise ImportError("""Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.""" ) SCREAMING_SNAKE_CASE__ =False if main_process_only: SCREAMING_SNAKE_CASE__ =PartialState().local_process_index == 0 return _tqdm(*__UpperCamelCase, **__UpperCamelCase, disable=__UpperCamelCase )
588
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { "microsoft/trocr-base-handwritten": ( "https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json" ), # See all TrOCR models at https://huggingface.co/models?filter=trocr } class __a ( __lowerCamelCase ): """simple docstring""" _A : List[Any] = "trocr" _A : Optional[int] = ["past_key_values"] _A : Dict = { "num_attention_heads": "decoder_attention_heads", "hidden_size": "d_model", "num_hidden_layers": "decoder_layers", } def __init__( self : str ,_UpperCamelCase : Dict=5_0_2_6_5 ,_UpperCamelCase : int=1_0_2_4 ,_UpperCamelCase : Union[str, Any]=1_2 ,_UpperCamelCase : Union[str, Any]=1_6 ,_UpperCamelCase : List[Any]=4_0_9_6 ,_UpperCamelCase : str="gelu" ,_UpperCamelCase : Dict=5_1_2 ,_UpperCamelCase : List[Any]=0.1 ,_UpperCamelCase : Dict=0.0 ,_UpperCamelCase : Optional[int]=0.0 ,_UpperCamelCase : Union[str, Any]=2 ,_UpperCamelCase : str=0.02 ,_UpperCamelCase : Any=0.0 ,_UpperCamelCase : Optional[int]=True ,_UpperCamelCase : str=False ,_UpperCamelCase : int=True ,_UpperCamelCase : Tuple=True ,_UpperCamelCase : str=1 ,_UpperCamelCase : Optional[Any]=0 ,_UpperCamelCase : str=2 ,**_UpperCamelCase : Any ,) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE__ =vocab_size SCREAMING_SNAKE_CASE__ =d_model SCREAMING_SNAKE_CASE__ =decoder_layers SCREAMING_SNAKE_CASE__ =decoder_attention_heads SCREAMING_SNAKE_CASE__ =decoder_ffn_dim SCREAMING_SNAKE_CASE__ =activation_function SCREAMING_SNAKE_CASE__ =max_position_embeddings SCREAMING_SNAKE_CASE__ =dropout SCREAMING_SNAKE_CASE__ =attention_dropout SCREAMING_SNAKE_CASE__ =activation_dropout SCREAMING_SNAKE_CASE__ =init_std SCREAMING_SNAKE_CASE__ =decoder_layerdrop SCREAMING_SNAKE_CASE__ =use_cache SCREAMING_SNAKE_CASE__ =scale_embedding SCREAMING_SNAKE_CASE__ =use_learned_position_embeddings SCREAMING_SNAKE_CASE__ =layernorm_embedding super().__init__( pad_token_id=_UpperCamelCase ,bos_token_id=_UpperCamelCase ,eos_token_id=_UpperCamelCase ,decoder_start_token_id=_UpperCamelCase ,**_UpperCamelCase ,)
588
1
'''simple docstring''' import copy from typing import Any, Dict, List, Optional, Union import numpy as np import torch from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging __UpperCAmelCase = logging.get_logger(__name__) class a__ ( a__ ): '''simple docstring''' lowercase__ : List[str] = ["input_features", "is_longer"] def __init__( self , lowerCamelCase_=64 , lowerCamelCase_=4_80_00 , lowerCamelCase_=4_80 , lowerCamelCase_=10 , lowerCamelCase_=10_24 , lowerCamelCase_=0.0 , lowerCamelCase_=False , lowerCamelCase_ = 0 , lowerCamelCase_ = 1_40_00 , lowerCamelCase_ = None , lowerCamelCase_ = "fusion" , lowerCamelCase_ = "repeatpad" , **lowerCamelCase_ , ) -> Tuple: super().__init__( feature_size=lowerCamelCase_ , sampling_rate=lowerCamelCase_ , padding_value=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , **lowerCamelCase_ , ) lowerCAmelCase__ = top_db lowerCAmelCase__ = truncation lowerCAmelCase__ = padding lowerCAmelCase__ = fft_window_size lowerCAmelCase__ = (fft_window_size >> 1) + 1 lowerCAmelCase__ = hop_length lowerCAmelCase__ = max_length_s lowerCAmelCase__ = max_length_s * sampling_rate lowerCAmelCase__ = sampling_rate lowerCAmelCase__ = frequency_min lowerCAmelCase__ = frequency_max lowerCAmelCase__ = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=lowerCamelCase_ , min_frequency=lowerCamelCase_ , max_frequency=lowerCamelCase_ , sampling_rate=lowerCamelCase_ , norm=lowerCamelCase_ , mel_scale='''htk''' , ) lowerCAmelCase__ = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=lowerCamelCase_ , min_frequency=lowerCamelCase_ , max_frequency=lowerCamelCase_ , sampling_rate=lowerCamelCase_ , norm='''slaney''' , mel_scale='''slaney''' , ) def __SCREAMING_SNAKE_CASE ( self ) -> Dict[str, Any]: lowerCAmelCase__ = copy.deepcopy(self.__dict__ ) lowerCAmelCase__ = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] if "mel_filters_slaney" in output: del output["mel_filters_slaney"] return output def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None ) -> np.ndarray: lowerCAmelCase__ = spectrogram( lowerCamelCase_ , window_function(self.fft_window_size , '''hann''' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=lowerCamelCase_ , log_mel='''dB''' , ) return log_mel_spectrogram.T def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> str: lowerCAmelCase__ = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 ) if len(ranges[1] ) == 0: # if the audio is too short, we just use the first chunk lowerCAmelCase__ = [0] if len(ranges[2] ) == 0: # if the audio is too short, we just use the first chunk lowerCAmelCase__ = [0] # randomly choose index for each part lowerCAmelCase__ = np.random.choice(ranges[0] ) lowerCAmelCase__ = np.random.choice(ranges[1] ) lowerCAmelCase__ = np.random.choice(ranges[2] ) lowerCAmelCase__ = mel[idx_front : idx_front + chunk_frames, :] lowerCAmelCase__ = mel[idx_middle : idx_middle + chunk_frames, :] lowerCAmelCase__ = mel[idx_back : idx_back + chunk_frames, :] lowerCAmelCase__ = torch.tensor(mel[None, None, :] ) lowerCAmelCase__ = torch.nn.functional.interpolate( lowerCamelCase_ , size=[chunk_frames, 64] , mode='''bilinear''' , align_corners=lowerCamelCase_ ) lowerCAmelCase__ = mel_shrink[0][0].numpy() lowerCAmelCase__ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 ) return mel_fusion def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> np.array: if waveform.shape[0] > max_length: if truncation == "rand_trunc": lowerCAmelCase__ = True # random crop to max_length (for compatibility) -> this should be handled by self.pad lowerCAmelCase__ = len(lowerCamelCase_ ) - max_length lowerCAmelCase__ = np.random.randint(0 , overflow + 1 ) lowerCAmelCase__ = waveform[idx : idx + max_length] lowerCAmelCase__ = self._np_extract_fbank_features(lowerCamelCase_ , self.mel_filters_slaney )[None, :] elif truncation == "fusion": lowerCAmelCase__ = self._np_extract_fbank_features(lowerCamelCase_ , self.mel_filters ) lowerCAmelCase__ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed lowerCAmelCase__ = mel.shape[0] if chunk_frames == total_frames: # there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length. # In this case, we just use the whole audio. lowerCAmelCase__ = np.stack([mel, mel, mel, mel] , axis=0 ) lowerCAmelCase__ = False else: lowerCAmelCase__ = self._random_mel_fusion(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) lowerCAmelCase__ = True else: raise NotImplementedError(F"""data_truncating {truncation} not implemented""" ) else: lowerCAmelCase__ = False # only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding if waveform.shape[0] < max_length: if padding == "repeat": lowerCAmelCase__ = int(max_length / len(lowerCamelCase_ ) ) lowerCAmelCase__ = np.stack(np.tile(lowerCamelCase_ , n_repeat + 1 ) )[:max_length] if padding == "repeatpad": lowerCAmelCase__ = int(max_length / len(lowerCamelCase_ ) ) lowerCAmelCase__ = np.stack(np.tile(lowerCamelCase_ , lowerCamelCase_ ) ) lowerCAmelCase__ = np.pad(lowerCamelCase_ , (0, max_length - waveform.shape[0]) , mode='''constant''' , constant_values=0 ) if truncation == "fusion": lowerCAmelCase__ = self._np_extract_fbank_features(lowerCamelCase_ , self.mel_filters ) lowerCAmelCase__ = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 ) else: lowerCAmelCase__ = self._np_extract_fbank_features(lowerCamelCase_ , self.mel_filters_slaney )[None, :] return input_mel, longer def __call__( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> BatchFeature: lowerCAmelCase__ = truncation if truncation is not None else self.truncation lowerCAmelCase__ = padding if padding else self.padding if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a""" F""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input""" F""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( '''It is strongly recommended to pass the `sampling_rate` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) lowerCAmelCase__ = isinstance(lowerCamelCase_ , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" ) lowerCAmelCase__ = is_batched_numpy or ( isinstance(lowerCamelCase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: lowerCAmelCase__ = [np.asarray(lowerCamelCase_ , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(lowerCamelCase_ , np.ndarray ): lowerCAmelCase__ = np.asarray(lowerCamelCase_ , dtype=np.floataa ) elif isinstance(lowerCamelCase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): lowerCAmelCase__ = raw_speech.astype(np.floataa ) # always return batch if not is_batched: lowerCAmelCase__ = [np.asarray(lowerCamelCase_ )] # convert to mel spectrogram, truncate and pad if needed. lowerCAmelCase__ = [ self._get_input_mel(lowerCamelCase_ , max_length if max_length else self.nb_max_samples , lowerCamelCase_ , lowerCamelCase_ ) for waveform in raw_speech ] lowerCAmelCase__ = [] lowerCAmelCase__ = [] for mel, longer in padded_inputs: input_mel.append(lowerCamelCase_ ) is_longer.append(lowerCamelCase_ ) if truncation == "fusion" and sum(lowerCamelCase_ ) == 0: # if no audio is longer than 10s, then randomly select one audio to be longer lowerCAmelCase__ = np.random.randint(0 , len(lowerCamelCase_ ) ) lowerCAmelCase__ = True if isinstance(input_mel[0] , lowerCamelCase_ ): lowerCAmelCase__ = [np.asarray(lowerCamelCase_ , dtype=np.floataa ) for feature in input_mel] # is_longer is a list of bool lowerCAmelCase__ = [[longer] for longer in is_longer] lowerCAmelCase__ = {'''input_features''': input_mel, '''is_longer''': is_longer} lowerCAmelCase__ = BatchFeature(lowerCamelCase_ ) if return_tensors is not None: lowerCAmelCase__ = input_features.convert_to_tensors(lowerCamelCase_ ) return input_features
90
"""simple docstring""" from __future__ import annotations from collections.abc import Iterator from typing import Generic, TypeVar lowerCAmelCase__ = TypeVar('''T''') class __snake_case ( Generic[T]): def __init__( self : int , __lowerCAmelCase : T ): """simple docstring""" _lowerCamelCase : Optional[int] = data _lowerCamelCase : Node[T] | None = None def __str__( self : Optional[Any] ): """simple docstring""" return f'''{self.data}''' class __snake_case ( Generic[T]): def __init__( self : int ): """simple docstring""" _lowerCamelCase : Node[T] | None = None def __iter__( self : str ): """simple docstring""" _lowerCamelCase : List[str] = self.top while node: yield node.data _lowerCamelCase : Any = node.next def __str__( self : int ): """simple docstring""" return "->".join([str(__lowerCAmelCase ) for item in self] ) def __len__( self : int ): """simple docstring""" return len(tuple(iter(self ) ) ) def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" return self.top is None def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : T ): """simple docstring""" _lowerCamelCase : Tuple = Node(__lowerCAmelCase ) if not self.is_empty(): _lowerCamelCase : Optional[int] = self.top _lowerCamelCase : List[str] = node def SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" if self.is_empty(): raise IndexError('''pop from empty stack''' ) assert isinstance(self.top , __lowerCAmelCase ) _lowerCamelCase : Any = self.top _lowerCamelCase : Any = self.top.next return pop_node.data def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" if self.is_empty(): raise IndexError('''peek from empty stack''' ) assert self.top is not None return self.top.data def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" _lowerCamelCase : List[str] = None if __name__ == "__main__": from doctest import testmod testmod()
83
0
"""simple docstring""" from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError import requests def _a ( UpperCAmelCase__ = "isbn/0140328726" ) -> dict: __SCREAMING_SNAKE_CASE = olid.strip().strip('''/''' ) # Remove leading/trailing whitespace & slashes if new_olid.count('''/''' ) != 1: __SCREAMING_SNAKE_CASE = f"""{olid} is not a valid Open Library olid""" raise ValueError(UpperCAmelCase__ ) return requests.get(f"""https://openlibrary.org/{new_olid}.json""" ).json() def _a ( UpperCAmelCase__ ) -> dict: __SCREAMING_SNAKE_CASE = { '''title''': '''Title''', '''publish_date''': '''Publish date''', '''authors''': '''Authors''', '''number_of_pages''': '''Number of pages:''', '''first_sentence''': '''First sentence''', '''isbn_10''': '''ISBN (10)''', '''isbn_13''': '''ISBN (13)''', } __SCREAMING_SNAKE_CASE = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()} __SCREAMING_SNAKE_CASE = [ get_openlibrary_data(author['''key'''] )['''name'''] for author in data['''Authors'''] ] __SCREAMING_SNAKE_CASE = data['''First sentence''']['''value'''] for key, value in data.items(): if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): __SCREAMING_SNAKE_CASE = ''', '''.join(UpperCAmelCase__ ) return data if __name__ == "__main__": import doctest doctest.testmod() while True: lowerCAmelCase__ =input("\nEnter the ISBN code to search (or 'quit' to stop): ").strip() if isbn.lower() in ("", "q", "quit", "exit", "stop"): break if len(isbn) not in (10, 13) or not isbn.isdigit(): print(F'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''') continue print(F'''\nSearching Open Library for ISBN: {isbn}...\n''') try: lowerCAmelCase__ =summarize_book(get_openlibrary_data(F'''isbn/{isbn}''')) print("\n".join(F'''{key}: {value}''' for key, value in book_summary.items())) except JSONDecodeError: # Workaround for requests.exceptions.RequestException: print(F'''Sorry, there are no results for ISBN: {isbn}.''')
690
"""simple docstring""" from __future__ import annotations from collections.abc import Callable lowerCAmelCase__ =list[list[float | int]] def _a ( UpperCAmelCase__ , UpperCAmelCase__ ) -> Matrix: __SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = [[0 for _ in range(size + 1 )] for _ in range(UpperCAmelCase__ )] __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 for row in range(UpperCAmelCase__ ): for col in range(UpperCAmelCase__ ): __SCREAMING_SNAKE_CASE = matrix[row][col] __SCREAMING_SNAKE_CASE = vector[row][0] __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 0 while row < size and col < size: # pivoting __SCREAMING_SNAKE_CASE = max((abs(augmented[rowa][col] ), rowa) for rowa in range(UpperCAmelCase__ , UpperCAmelCase__ ) )[ 1 ] if augmented[pivot_row][col] == 0: col += 1 continue else: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = augmented[pivot_row], augmented[row] for rowa in range(row + 1 , UpperCAmelCase__ ): __SCREAMING_SNAKE_CASE = augmented[rowa][col] / augmented[row][col] __SCREAMING_SNAKE_CASE = 0 for cola in range(col + 1 , size + 1 ): augmented[rowa][cola] -= augmented[row][cola] * ratio row += 1 col += 1 # back substitution for col in range(1 , UpperCAmelCase__ ): for row in range(UpperCAmelCase__ ): __SCREAMING_SNAKE_CASE = augmented[row][col] / augmented[col][col] for cola in range(UpperCAmelCase__ , size + 1 ): augmented[row][cola] -= augmented[col][cola] * ratio # round to get rid of numbers like 2.000000000000004 return [ [round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(UpperCAmelCase__ ) ] def _a ( UpperCAmelCase__ ) -> Callable[[int], int]: __SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = [[0 for _ in range(UpperCAmelCase__ )] for _ in range(UpperCAmelCase__ )] __SCREAMING_SNAKE_CASE = [[0] for _ in range(UpperCAmelCase__ )] __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 for x_val, y_val in enumerate(UpperCAmelCase__ ): for col in range(UpperCAmelCase__ ): __SCREAMING_SNAKE_CASE = (x_val + 1) ** (size - col - 1) __SCREAMING_SNAKE_CASE = y_val __SCREAMING_SNAKE_CASE = solve(UpperCAmelCase__ , UpperCAmelCase__ ) def interpolated_func(UpperCAmelCase__ ) -> int: return sum( round(coeffs[x_val][0] ) * (var ** (size - x_val - 1)) for x_val in range(UpperCAmelCase__ ) ) return interpolated_func def _a ( UpperCAmelCase__ ) -> int: return ( 1 - variable + variable**2 - variable**3 + variable**4 - variable**5 + variable**6 - variable**7 + variable**8 - variable**9 + variable**10 ) def _a ( UpperCAmelCase__ = question_function , UpperCAmelCase__ = 10 ) -> int: __SCREAMING_SNAKE_CASE = [func(UpperCAmelCase__ ) for x_val in range(1 , order + 1 )] __SCREAMING_SNAKE_CASE = [ interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 ) ] __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 for poly in polynomials: __SCREAMING_SNAKE_CASE = 1 while func(UpperCAmelCase__ ) == poly(UpperCAmelCase__ ): x_val += 1 ret += poly(UpperCAmelCase__ ) return ret if __name__ == "__main__": print(F'''{solution() = }''')
690
1
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation import warnings from .state import AcceleratorState, GradientState warnings.filterwarnings('ignore', category=UserWarning, module='torch.optim.lr_scheduler') class _snake_case : def __init__( self , a , a , a = True , a = False) -> Optional[int]: SCREAMING_SNAKE_CASE = scheduler SCREAMING_SNAKE_CASE = optimizers if isinstance(a , (list, tuple)) else [optimizers] SCREAMING_SNAKE_CASE = split_batches SCREAMING_SNAKE_CASE = step_with_optimizer SCREAMING_SNAKE_CASE = GradientState() def SCREAMING_SNAKE_CASE__ ( self , *a , **a) -> List[Any]: if not self.step_with_optimizer: # No link between scheduler and optimizer -> just step self.scheduler.step(*a , **a) return # Otherwise, first make sure the optimizer was stepped. if not self.gradient_state.sync_gradients: if self.gradient_state.adjust_scheduler: self.scheduler._step_count += 1 return for opt in self.optimizers: if opt.step_was_skipped: return if self.split_batches: # Split batches -> the training dataloader batch size is not changed so one step per training step self.scheduler.step(*a , **a) else: # Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do # num_processes steps per training step SCREAMING_SNAKE_CASE = AcceleratorState().num_processes for _ in range(a): # Special case when using OneCycle and `drop_last` was not used if hasattr(self.scheduler , 'total_steps'): if self.scheduler._step_count <= self.scheduler.total_steps: self.scheduler.step(*a , **a) else: self.scheduler.step(*a , **a) def SCREAMING_SNAKE_CASE__ ( self) -> Any: return self.scheduler.get_last_lr() def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]: return self.scheduler.state_dict() def SCREAMING_SNAKE_CASE__ ( self , a) -> Optional[Any]: self.scheduler.load_state_dict(a) def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]: return self.scheduler.get_lr() def SCREAMING_SNAKE_CASE__ ( self , *a , **a) -> str: return self.scheduler.print_lr(*a , **a)
73
'''simple docstring''' from PIL import Image def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Image: '''simple docstring''' def brightness(__UpperCAmelCase ) -> float: return 128 + level + (c - 128) if not -2_5_5.0 <= level <= 2_5_5.0: raise ValueError('''level must be between -255.0 (black) and 255.0 (white)''' ) return img.point(__UpperCAmelCase ) if __name__ == "__main__": # Load image with Image.open('image_data/lena.jpg') as img: # Change brightness to 100 a : int = change_brightness(img, 100) brigt_img.save('image_data/lena_brightness.png', format='png')
640
0
from typing import Dict, Iterable, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) class A__ ( a__ ): lowerCAmelCase__ : int = ["""pixel_values"""] def __init__( self : Optional[int] , _UpperCAmelCase : Dict = True , _UpperCAmelCase : Union[str, Any] = None , _UpperCAmelCase : Dict = PILImageResampling.BICUBIC , _UpperCAmelCase : Union[str, Any] = True , _UpperCAmelCase : Any = None , _UpperCAmelCase : str = True , _UpperCAmelCase : str = 1 / 2_55 , _UpperCAmelCase : List[Any] = True , _UpperCAmelCase : Optional[int] = IMAGENET_DEFAULT_MEAN , _UpperCAmelCase : Union[str, Any] = IMAGENET_DEFAULT_STD , **_UpperCAmelCase : Union[str, Any] , ) -> None: """simple docstring""" super().__init__(**lowercase__ ) __lowercase = size if size is not None else {'''shortest_edge''': 2_24} __lowercase = get_size_dict(lowercase__ , default_to_square=lowercase__ ) __lowercase = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24} __lowercase = get_size_dict(lowercase__ , param_name='crop_size' ) __lowercase = do_resize __lowercase = size __lowercase = resample __lowercase = do_center_crop __lowercase = crop_size __lowercase = do_rescale __lowercase = rescale_factor __lowercase = do_normalize __lowercase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN __lowercase = image_std if image_std is not None else IMAGENET_DEFAULT_STD def a__ ( self : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Dict = PILImageResampling.BICUBIC , _UpperCAmelCase : List[Any] = None , **_UpperCAmelCase : Any , ) -> np.ndarray: """simple docstring""" __lowercase = get_size_dict(lowercase__ , default_to_square=lowercase__ ) # size_dict is a dict with either keys "height" and "width" or "shortest_edge" if "shortest_edge" in size: __lowercase = int((2_56 / 2_24) * size['shortest_edge'] ) __lowercase = get_resize_output_image_size(lowercase__ , size=lowercase__ , default_to_square=lowercase__ ) __lowercase = {'''height''': output_size[0], '''width''': output_size[1]} if "height" not in size_dict or "width" not in size_dict: raise ValueError( f"""Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}""" ) return resize( lowercase__ , size=(size_dict['height'], size_dict['width']) , resample=lowercase__ , data_format=lowercase__ , **lowercase__ ) def a__ ( self : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int] = None , **_UpperCAmelCase : Optional[int] , ) -> np.ndarray: """simple docstring""" __lowercase = get_size_dict(lowercase__ ) if "height" not in size or "width" not in size: raise ValueError(f"""Size dict must have keys \'height\' and \'width\'. Got {size.keys()}""" ) return center_crop(lowercase__ , size=(size['height'], size['width']) , data_format=lowercase__ , **lowercase__ ) def a__ ( self : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any] = None , **_UpperCAmelCase : Tuple , ) -> np.ndarray: """simple docstring""" return rescale(lowercase__ , scale=lowercase__ , data_format=lowercase__ , **lowercase__ ) def a__ ( self : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] = None , **_UpperCAmelCase : Union[str, Any] , ) -> np.ndarray: """simple docstring""" return normalize(lowercase__ , mean=lowercase__ , std=lowercase__ , data_format=lowercase__ , **lowercase__ ) def a__ ( self : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any = None , _UpperCAmelCase : Union[str, Any] = None , _UpperCAmelCase : Optional[Any] = None , _UpperCAmelCase : List[Any] = None , _UpperCAmelCase : List[Any] = None , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Any = None , _UpperCAmelCase : Any = None , _UpperCAmelCase : str = None , _UpperCAmelCase : int = None , _UpperCAmelCase : str = None , _UpperCAmelCase : Union[str, Any] = ChannelDimension.FIRST , **_UpperCAmelCase : Optional[int] , ) -> BatchFeature: """simple docstring""" __lowercase = do_resize if do_resize is not None else self.do_resize __lowercase = resample if resample is not None else self.resample __lowercase = do_center_crop if do_center_crop is not None else self.do_center_crop __lowercase = do_rescale if do_rescale is not None else self.do_rescale __lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor __lowercase = do_normalize if do_normalize is not None else self.do_normalize __lowercase = image_mean if image_mean is not None else self.image_mean __lowercase = image_std if image_std is not None else self.image_std __lowercase = size if size is not None else self.size __lowercase = get_size_dict(lowercase__ , default_to_square=lowercase__ ) __lowercase = crop_size if crop_size is not None else self.crop_size __lowercase = get_size_dict(lowercase__ , param_name='crop_size' ) __lowercase = make_list_of_images(lowercase__ ) if not valid_images(lowercase__ ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # All transformations expect numpy arrays. __lowercase = [to_numpy_array(lowercase__ ) for image in images] if do_resize: __lowercase = [self.resize(lowercase__ , lowercase__ , lowercase__ ) for image in images] if do_center_crop: __lowercase = [self.center_crop(lowercase__ , lowercase__ ) for image in images] if do_rescale: __lowercase = [self.rescale(lowercase__ , lowercase__ ) for image in images] if do_normalize: __lowercase = [self.normalize(lowercase__ , lowercase__ , lowercase__ ) for image in images] __lowercase = [to_channel_dimension_format(lowercase__ , lowercase__ ) for image in images] __lowercase = {'''pixel_values''': images} return BatchFeature(data=lowercase__ , tensor_type=lowercase__ )
703
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tensorflow_text_available, is_tf_available, is_tokenizers_available, is_torch_available, ) SCREAMING_SNAKE_CASE__ = { """configuration_bert""": ["""BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BertConfig""", """BertOnnxConfig"""], """tokenization_bert""": ["""BasicTokenizer""", """BertTokenizer""", """WordpieceTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = ["""BertTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ """BERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """BertForMaskedLM""", """BertForMultipleChoice""", """BertForNextSentencePrediction""", """BertForPreTraining""", """BertForQuestionAnswering""", """BertForSequenceClassification""", """BertForTokenClassification""", """BertLayer""", """BertLMHeadModel""", """BertModel""", """BertPreTrainedModel""", """load_tf_weights_in_bert""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ """TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFBertEmbeddings""", """TFBertForMaskedLM""", """TFBertForMultipleChoice""", """TFBertForNextSentencePrediction""", """TFBertForPreTraining""", """TFBertForQuestionAnswering""", """TFBertForSequenceClassification""", """TFBertForTokenClassification""", """TFBertLMHeadModel""", """TFBertMainLayer""", """TFBertModel""", """TFBertPreTrainedModel""", ] try: if not is_tensorflow_text_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = ["""TFBertTokenizer"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ """FlaxBertForCausalLM""", """FlaxBertForMaskedLM""", """FlaxBertForMultipleChoice""", """FlaxBertForNextSentencePrediction""", """FlaxBertForPreTraining""", """FlaxBertForQuestionAnswering""", """FlaxBertForSequenceClassification""", """FlaxBertForTokenClassification""", """FlaxBertModel""", """FlaxBertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bert_fast import BertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bert import ( BERT_PRETRAINED_MODEL_ARCHIVE_LIST, BertForMaskedLM, BertForMultipleChoice, BertForNextSentencePrediction, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertForTokenClassification, BertLayer, BertLMHeadModel, BertModel, BertPreTrainedModel, load_tf_weights_in_bert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_bert import ( TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFBertEmbeddings, TFBertForMaskedLM, TFBertForMultipleChoice, TFBertForNextSentencePrediction, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertForTokenClassification, TFBertLMHeadModel, TFBertMainLayer, TFBertModel, TFBertPreTrainedModel, ) try: if not is_tensorflow_text_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bert_tf import TFBertTokenizer try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_bert import ( FlaxBertForCausalLM, FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForNextSentencePrediction, FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertModel, FlaxBertPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
688
0
import gc import random import unittest import torch from diffusers import ( IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ) from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference from . import IFPipelineTesterMixin @skip_mps class __UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" lowerCAmelCase_ = IFPipeline lowerCAmelCase_ = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''} lowerCAmelCase_ = TEXT_TO_IMAGE_BATCH_PARAMS lowerCAmelCase_ = PipelineTesterMixin.required_optional_params - {'''latents'''} def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" return self._get_dummy_components() def UpperCAmelCase__ ( self : Optional[int] , _A : Dict , _A : Dict=0 ): """simple docstring""" if str(_A ).startswith('''mps''' ): __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.manual_seed(_A ) else: __SCREAMING_SNAKE_CASE : Optional[Any] = torch.Generator(device=_A ).manual_seed(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" self._test_save_load_optional_components() @unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' ) def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" super().test_save_load_floataa(expected_max_diff=1e-1 ) def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 ) def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" self._test_save_load_local() def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" self._test_inference_batch_single_identical( expected_max_diff=1e-2 , ) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) @slow @require_torch_gpu class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase__ ( self : Any ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = IFPipeline.from_pretrained('''DeepFloyd/IF-I-XL-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa ) __SCREAMING_SNAKE_CASE : int = IFSuperResolutionPipeline.from_pretrained( '''DeepFloyd/IF-II-L-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa , text_encoder=_A , tokenizer=_A ) # pre compute text embeddings and remove T5 to save memory pipe_a.text_encoder.to('''cuda''' ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = pipe_a.encode_prompt('''anime turtle''' , device='''cuda''' ) del pipe_a.tokenizer del pipe_a.text_encoder gc.collect() __SCREAMING_SNAKE_CASE : List[Any] = None __SCREAMING_SNAKE_CASE : str = None pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if(_A , _A , _A , _A ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # img2img __SCREAMING_SNAKE_CASE : Dict = IFImgaImgPipeline(**pipe_a.components ) __SCREAMING_SNAKE_CASE : int = IFImgaImgSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_imgaimg(_A , _A , _A , _A ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # inpainting __SCREAMING_SNAKE_CASE : int = IFInpaintingPipeline(**pipe_a.components ) __SCREAMING_SNAKE_CASE : Dict = IFInpaintingSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_inpainting(_A , _A , _A , _A ) def UpperCAmelCase__ ( self : Optional[Any] , _A : str , _A : Optional[Any] , _A : Tuple , _A : List[str] ): """simple docstring""" _start_torch_memory_measurement() __SCREAMING_SNAKE_CASE : List[Any] = torch.Generator(device='''cpu''' ).manual_seed(0 ) __SCREAMING_SNAKE_CASE : Union[str, Any] = pipe_a( prompt_embeds=_A , negative_prompt_embeds=_A , num_inference_steps=2 , generator=_A , output_type='''np''' , ) __SCREAMING_SNAKE_CASE : int = output.images[0] assert image.shape == (64, 64, 3) __SCREAMING_SNAKE_CASE : Dict = torch.cuda.max_memory_allocated() assert mem_bytes < 13 * 10**9 __SCREAMING_SNAKE_CASE : List[str] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy''' ) assert_mean_pixel_difference(_A , _A ) # pipeline 2 _start_torch_memory_measurement() __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Generator(device='''cpu''' ).manual_seed(0 ) __SCREAMING_SNAKE_CASE : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_A ) __SCREAMING_SNAKE_CASE : int = pipe_a( prompt_embeds=_A , negative_prompt_embeds=_A , image=_A , generator=_A , num_inference_steps=2 , output_type='''np''' , ) __SCREAMING_SNAKE_CASE : int = output.images[0] assert image.shape == (256, 256, 3) __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 __SCREAMING_SNAKE_CASE : Optional[int] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy''' ) assert_mean_pixel_difference(_A , _A ) def UpperCAmelCase__ ( self : Dict , _A : Optional[Any] , _A : List[Any] , _A : Optional[int] , _A : Union[str, Any] ): """simple docstring""" _start_torch_memory_measurement() __SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_A ) __SCREAMING_SNAKE_CASE : Optional[Any] = torch.Generator(device='''cpu''' ).manual_seed(0 ) __SCREAMING_SNAKE_CASE : List[Any] = pipe_a( prompt_embeds=_A , negative_prompt_embeds=_A , image=_A , num_inference_steps=2 , generator=_A , output_type='''np''' , ) __SCREAMING_SNAKE_CASE : Any = output.images[0] assert image.shape == (64, 64, 3) __SCREAMING_SNAKE_CASE : List[str] = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 __SCREAMING_SNAKE_CASE : Any = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy''' ) assert_mean_pixel_difference(_A , _A ) # pipeline 2 _start_torch_memory_measurement() __SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device='''cpu''' ).manual_seed(0 ) __SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(_A ) __SCREAMING_SNAKE_CASE : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = pipe_a( prompt_embeds=_A , negative_prompt_embeds=_A , image=_A , original_image=_A , generator=_A , num_inference_steps=2 , output_type='''np''' , ) __SCREAMING_SNAKE_CASE : List[Any] = output.images[0] assert image.shape == (256, 256, 3) __SCREAMING_SNAKE_CASE : Dict = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 __SCREAMING_SNAKE_CASE : List[Any] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy''' ) assert_mean_pixel_difference(_A , _A ) def UpperCAmelCase__ ( self : Optional[int] , _A : List[str] , _A : List[str] , _A : Any , _A : Dict ): """simple docstring""" _start_torch_memory_measurement() __SCREAMING_SNAKE_CASE : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_A ) __SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(_A ) __SCREAMING_SNAKE_CASE : int = torch.Generator(device='''cpu''' ).manual_seed(0 ) __SCREAMING_SNAKE_CASE : Tuple = pipe_a( prompt_embeds=_A , negative_prompt_embeds=_A , image=_A , mask_image=_A , num_inference_steps=2 , generator=_A , output_type='''np''' , ) __SCREAMING_SNAKE_CASE : List[Any] = output.images[0] assert image.shape == (64, 64, 3) __SCREAMING_SNAKE_CASE : int = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 __SCREAMING_SNAKE_CASE : Dict = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy''' ) assert_mean_pixel_difference(_A , _A ) # pipeline 2 _start_torch_memory_measurement() __SCREAMING_SNAKE_CASE : Dict = torch.Generator(device='''cpu''' ).manual_seed(0 ) __SCREAMING_SNAKE_CASE : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_A ) __SCREAMING_SNAKE_CASE : Optional[Any] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(_A ) __SCREAMING_SNAKE_CASE : Tuple = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(_A ) __SCREAMING_SNAKE_CASE : Optional[Any] = pipe_a( prompt_embeds=_A , negative_prompt_embeds=_A , image=_A , mask_image=_A , original_image=_A , generator=_A , num_inference_steps=2 , output_type='''np''' , ) __SCREAMING_SNAKE_CASE : Union[str, Any] = output.images[0] assert image.shape == (256, 256, 3) __SCREAMING_SNAKE_CASE : List[str] = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 __SCREAMING_SNAKE_CASE : List[Any] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy''' ) assert_mean_pixel_difference(_A , _A ) def a__ ( ): """simple docstring""" torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats()
74
from typing import List, Optional import numpy as np from ...processing_utils import ProcessorMixin from ...utils import to_numpy class snake_case_ ( a ): '''simple docstring''' __UpperCamelCase = 'EncodecFeatureExtractor' __UpperCamelCase = ('T5Tokenizer', 'T5TokenizerFast') def __init__( self, A_, A_ ) -> Optional[int]: super().__init__(A_, A_ ) UpperCAmelCase__ =self.feature_extractor UpperCAmelCase__ =False def __UpperCAmelCase ( self, A_=None, A_=None, A_=True ) -> Union[str, Any]: return self.tokenizer.get_decoder_prompt_ids(task=A_, language=A_, no_timestamps=A_ ) def __call__( self, *A_, **A_ ) -> Any: # For backward compatibility if self._in_target_context_manager: return self.current_processor(*A_, **A_ ) UpperCAmelCase__ =kwargs.pop("audio", A_ ) UpperCAmelCase__ =kwargs.pop("sampling_rate", A_ ) UpperCAmelCase__ =kwargs.pop("text", A_ ) if len(A_ ) > 0: UpperCAmelCase__ =args[0] UpperCAmelCase__ =args[1:] if audio is None and text is None: raise ValueError("You need to specify either an `audio` or `text` input to process." ) if text is not None: UpperCAmelCase__ =self.tokenizer(A_, **A_ ) if audio is not None: UpperCAmelCase__ =self.feature_extractor(A_, *A_, sampling_rate=A_, **A_ ) if audio is None: return inputs elif text is None: return audio_inputs else: UpperCAmelCase__ =audio_inputs["input_values"] if "padding_mask" in audio_inputs: UpperCAmelCase__ =audio_inputs["padding_mask"] return inputs def __UpperCAmelCase ( self, *A_, **A_ ) -> Dict: UpperCAmelCase__ =kwargs.pop("audio", A_ ) UpperCAmelCase__ =kwargs.pop("padding_mask", A_ ) if len(A_ ) > 0: UpperCAmelCase__ =args[0] UpperCAmelCase__ =args[1:] if audio_values is not None: return self._decode_audio(A_, padding_mask=A_ ) else: return self.tokenizer.batch_decode(*A_, **A_ ) def __UpperCAmelCase ( self, *A_, **A_ ) -> int: return self.tokenizer.decode(*A_, **A_ ) def __UpperCAmelCase ( self, A_, A_ = None ) -> List[np.ndarray]: UpperCAmelCase__ =to_numpy(A_ ) UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ =audio_values.shape if padding_mask is None: return list(A_ ) UpperCAmelCase__ =to_numpy(A_ ) # match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding** # token (so that the generated audio values are **not** treated as padded tokens) UpperCAmelCase__ =seq_len - padding_mask.shape[-1] UpperCAmelCase__ =1 - self.feature_extractor.padding_value UpperCAmelCase__ =np.pad(A_, ((0, 0), (0, difference)), "constant", constant_values=A_ ) UpperCAmelCase__ =audio_values.tolist() for i in range(A_ ): UpperCAmelCase__ =np.asarray(audio_values[i] )[ padding_mask[i][None, :] != self.feature_extractor.padding_value ] UpperCAmelCase__ =sliced_audio.reshape(A_, -1 ) return audio_values
625
0
from typing import List import numpy as np def a ( lowerCamelCase_ ): '''simple docstring''' lowercase__ = {key: len(snake_case_ ) for key, value in gen_kwargs.items() if isinstance(snake_case_ , snake_case_ )} if len(set(lists_lengths.values() ) ) > 1: raise RuntimeError( ( '''Sharding is ambiguous for this dataset: ''' + '''we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n''' + '''\n'''.join(F"""\t- key {key} has length {length}""" for key, length in lists_lengths.items() ) + '''\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, ''' + '''and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.''' ) ) lowercase__ = max(lists_lengths.values() , default=0 ) return max(1 , snake_case_ ) def a ( lowerCamelCase_ , lowerCamelCase_ ): '''simple docstring''' lowercase__ = [] for group_idx in range(snake_case_ ): lowercase__ = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs)) if num_shards_to_add == 0: break lowercase__ = shards_indices_per_group[-1].stop if shards_indices_per_group else 0 lowercase__ = range(snake_case_ , start + num_shards_to_add ) shards_indices_per_group.append(snake_case_ ) return shards_indices_per_group def a ( lowerCamelCase_ , lowerCamelCase_ ): '''simple docstring''' lowercase__ = _number_of_shards_in_gen_kwargs(snake_case_ ) if num_shards == 1: return [dict(snake_case_ )] else: lowercase__ = _distribute_shards(num_shards=snake_case_ , max_num_jobs=snake_case_ ) return [ { key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]] if isinstance(snake_case_ , snake_case_ ) else value for key, value in gen_kwargs.items() } for group_idx in range(len(snake_case_ ) ) ] def a ( lowerCamelCase_ ): '''simple docstring''' return { key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]] if isinstance(gen_kwargs_list[0][key] , snake_case_ ) else gen_kwargs_list[0][key] for key in gen_kwargs_list[0] } def a ( lowerCamelCase_ , lowerCamelCase_ ): '''simple docstring''' lowercase__ = {len(snake_case_ ) for value in gen_kwargs.values() if isinstance(snake_case_ , snake_case_ )} lowercase__ = {} for size in list_sizes: lowercase__ = list(range(snake_case_ ) ) rng.shuffle(indices_per_size[size] ) # Now let's copy the gen_kwargs and shuffle the lists based on their sizes lowercase__ = dict(snake_case_ ) for key, value in shuffled_kwargs.items(): if isinstance(snake_case_ , snake_case_ ): lowercase__ = [value[i] for i in indices_per_size[len(snake_case_ )]] return shuffled_kwargs
719
from functools import reduce A__ : Union[str, Any] = ( '73167176531330624919225119674426574742355349194934' '96983520312774506326239578318016984801869478851843' '85861560789112949495459501737958331952853208805511' '12540698747158523863050715693290963295227443043557' '66896648950445244523161731856403098711121722383113' '62229893423380308135336276614282806444486645238749' '30358907296290491560440772390713810515859307960866' '70172427121883998797908792274921901699720888093776' '65727333001053367881220235421809751254540594752243' '52584907711670556013604839586446706324415722155397' '53697817977846174064955149290862569321978468622482' '83972241375657056057490261407972968652414535100474' '82166370484403199890008895243450658541227588666881' '16427171479924442928230863465674813919123162824586' '17866458359124566529476545682848912883142607690042' '24219022671055626321111109370544217506941658960408' '07198403850962455444362981230987879927244284909188' '84580156166097919133875499200524063689912560717606' '05886116467109405077541002256983155200055935729725' '71636269561882670428252483600823257530420752963450' ) def a ( lowerCamelCase_ = N ): '''simple docstring''' return max( # mypy cannot properly interpret reduce int(reduce(lambda lowerCamelCase_ , lowerCamelCase_ : str(int(lowerCamelCase_ ) * int(lowerCamelCase_ ) ) , n[i : i + 13] ) ) for i in range(len(lowerCamelCase_ ) - 12 ) ) if __name__ == "__main__": print(F"{solution() = }")
671
0
from multiprocessing import Lock, Pipe, Process # lock used to ensure that two processes do not access a pipe at the same time snake_case = Lock() def SCREAMING_SNAKE_CASE__ ( snake_case__ :Optional[int] , snake_case__ :Union[str, Any] , snake_case__ :Tuple , snake_case__ :Any , snake_case__ :Dict , snake_case__ :Optional[int] , snake_case__ :List[str] ) -> Optional[Any]: global process_lock # we perform n swaps since after n swaps we know we are sorted # we *could* stop early if we are sorted already, but it takes as long to # find out we are sorted as it does to sort the list with this algorithm for i in range(0 , 10 ): if (i + position) % 2 == 0 and r_send is not None: # send your value to your right neighbor process_lock.acquire() r_send[1].send(snake_case__ ) process_lock.release() # receive your right neighbor's value process_lock.acquire() _lowercase = rr_cv[0].recv() process_lock.release() # take the lower value since you are on the left _lowercase = min(snake_case__ , snake_case__ ) elif (i + position) % 2 != 0 and l_send is not None: # send your value to your left neighbor process_lock.acquire() l_send[1].send(snake_case__ ) process_lock.release() # receive your left neighbor's value process_lock.acquire() _lowercase = lr_cv[0].recv() process_lock.release() # take the higher value since you are on the right _lowercase = max(snake_case__ , snake_case__ ) # after all swaps are performed, send the values back to main result_pipe[1].send(snake_case__ ) def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> Dict: _lowercase = [] _lowercase = [] # initialize the list of pipes where the values will be retrieved for _ in arr: result_pipe.append(Pipe() ) # creates the processes # the first and last process only have one neighbor so they are made outside # of the loop _lowercase = Pipe() _lowercase = Pipe() process_array_.append( Process( target=snake_case__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) ) _lowercase = temp_rs _lowercase = temp_rr for i in range(1 , len(snake_case__ ) - 1 ): _lowercase = Pipe() _lowercase = Pipe() process_array_.append( Process( target=snake_case__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) ) _lowercase = temp_rs _lowercase = temp_rr process_array_.append( Process( target=snake_case__ , args=( len(snake_case__ ) - 1, arr[len(snake_case__ ) - 1], temp_ls, None, temp_lr, None, result_pipe[len(snake_case__ ) - 1], ) , ) ) # start the processes for p in process_array_: p.start() # wait for the processes to end and write their values to the list for p in range(0 , len(snake_case__ ) ): _lowercase = result_pipe[p][0].recv() process_array_[p].join() return arr def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]: _lowercase = list(range(10 , 0 , -1 ) ) print('Initial List' ) print(*snake_case__ ) _lowercase = odd_even_transposition(snake_case__ ) print('Sorted List\n' ) print(*snake_case__ ) if __name__ == "__main__": main()
67
import argparse import re from pathlib import Path import requests import torch from PIL import Image from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor from transformers import ( EfficientFormerConfig, EfficientFormerForImageClassificationWithTeacher, EfficientFormerImageProcessor, ) from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def a_ ( _A , _A ) -> List[Any]: """simple docstring""" snake_case__ = old_name if "patch_embed" in old_name: snake_case__ , snake_case__ , snake_case__ = old_name.split('.' ) if layer == "0": snake_case__ = old_name.replace('0' , 'convolution1' ) elif layer == "1": snake_case__ = old_name.replace('1' , 'batchnorm_before' ) elif layer == "3": snake_case__ = old_name.replace('3' , 'convolution2' ) else: snake_case__ = old_name.replace('4' , 'batchnorm_after' ) if "network" in old_name and re.search(R'\d\.\d' , _A ): snake_case__ = R'\b\d{2}\b' if bool(re.search(_A , _A ) ): snake_case__ = re.search(R'\d\.\d\d.' , _A ).group() else: snake_case__ = re.search(R'\d\.\d.' , _A ).group() if int(match[0] ) < 6: snake_case__ = old_name.replace(_A , '' ) snake_case__ = trimmed_name.replace('network' , match[0] + '.meta4D_layers.blocks.' + match[2:-1] ) snake_case__ = 'intermediate_stages.' + trimmed_name else: snake_case__ = old_name.replace(_A , '' ) if int(match[2] ) < num_meta4D_last_stage: snake_case__ = trimmed_name.replace('network' , 'meta4D_layers.blocks.' + match[2] ) else: snake_case__ = str(int(match[2] ) - num_meta4D_last_stage ) snake_case__ = trimmed_name.replace('network' , 'meta3D_layers.blocks.' + layer_index ) if "norm1" in old_name: snake_case__ = trimmed_name.replace('norm1' , 'layernorm1' ) elif "norm2" in old_name: snake_case__ = trimmed_name.replace('norm2' , 'layernorm2' ) elif "fc1" in old_name: snake_case__ = trimmed_name.replace('fc1' , 'linear_in' ) elif "fc2" in old_name: snake_case__ = trimmed_name.replace('fc2' , 'linear_out' ) snake_case__ = 'last_stage.' + trimmed_name elif "network" in old_name and re.search(R'.\d.' , _A ): snake_case__ = old_name.replace('network' , 'intermediate_stages' ) if "fc" in new_name: snake_case__ = new_name.replace('fc' , 'convolution' ) elif ("norm1" in new_name) and ("layernorm1" not in new_name): snake_case__ = new_name.replace('norm1' , 'batchnorm_before' ) elif ("norm2" in new_name) and ("layernorm2" not in new_name): snake_case__ = new_name.replace('norm2' , 'batchnorm_after' ) if "proj" in new_name: snake_case__ = new_name.replace('proj' , 'projection' ) if "dist_head" in new_name: snake_case__ = new_name.replace('dist_head' , 'distillation_classifier' ) elif "head" in new_name: snake_case__ = new_name.replace('head' , 'classifier' ) elif "patch_embed" in new_name: snake_case__ = 'efficientformer.' + new_name elif new_name == "norm.weight" or new_name == "norm.bias": snake_case__ = new_name.replace('norm' , 'layernorm' ) snake_case__ = 'efficientformer.' + new_name else: snake_case__ = 'efficientformer.encoder.' + new_name return new_name def a_ ( _A , _A ) -> Optional[Any]: """simple docstring""" for key in checkpoint.copy().keys(): snake_case__ = checkpoint.pop(_A ) snake_case__ = val return checkpoint def a_ ( ) -> Union[str, Any]: """simple docstring""" snake_case__ = 'http://images.cocodataset.org/val2017/000000039769.jpg' snake_case__ = Image.open(requests.get(_A , stream=_A ).raw ) return image def a_ ( _A , _A , _A , _A ) -> Optional[Any]: """simple docstring""" snake_case__ = torch.load(_A , map_location='cpu' )['model'] snake_case__ = EfficientFormerConfig.from_json_file(_A ) snake_case__ = EfficientFormerForImageClassificationWithTeacher(_A ) snake_case__ = '_'.join(checkpoint_path.split('/' )[-1].split('.' )[0].split('_' )[:-1] ) snake_case__ = config.depths[-1] - config.num_metaad_blocks + 1 snake_case__ = convert_torch_checkpoint(_A , _A ) model.load_state_dict(_A ) model.eval() snake_case__ = { 'bilinear': PILImageResampling.BILINEAR, 'bicubic': PILImageResampling.BICUBIC, 'nearest': PILImageResampling.NEAREST, } # prepare image snake_case__ = prepare_img() snake_case__ = 256 snake_case__ = 224 snake_case__ = EfficientFormerImageProcessor( size={'shortest_edge': image_size} , crop_size={'height': crop_size, 'width': crop_size} , resample=pillow_resamplings['bicubic'] , ) snake_case__ = processor(images=_A , return_tensors='pt' ).pixel_values # original processing pipeline snake_case__ = Compose( [ Resize(_A , interpolation=pillow_resamplings['bicubic'] ), CenterCrop(_A ), ToTensor(), Normalize(_A , _A ), ] ) snake_case__ = image_transforms(_A ).unsqueeze(0 ) assert torch.allclose(_A , _A ) snake_case__ = model(_A ) snake_case__ = outputs.logits snake_case__ = (1, 1000) if "l1" in model_name: snake_case__ = torch.Tensor( [-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] ) assert torch.allclose(logits[0, :10] , _A , atol=1e-3 ) assert logits.shape == expected_shape elif "l3" in model_name: snake_case__ = torch.Tensor( [-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] ) assert torch.allclose(logits[0, :10] , _A , atol=1e-3 ) assert logits.shape == expected_shape elif "l7" in model_name: snake_case__ = torch.Tensor( [-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] ) assert logits.shape == expected_shape else: raise ValueError( f'''Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7''' ) # Save Checkpoints Path(_A ).mkdir(exist_ok=_A ) model.save_pretrained(_A ) print(f'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' ) processor.save_pretrained(_A ) print(f'''Processor successfuly saved at {pytorch_dump_path}''' ) if push_to_hub: print('Pushing model to the hub...' ) model.push_to_hub( repo_id=f'''Bearnardd/{pytorch_dump_path}''' , commit_message='Add model' , use_temp_dir=_A , ) processor.push_to_hub( repo_id=f'''Bearnardd/{pytorch_dump_path}''' , commit_message='Add image processor' , use_temp_dir=_A , ) if __name__ == "__main__": __UpperCamelCase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--pytorch_model_path""", default=None, type=str, required=True, help="""Path to EfficientFormer pytorch checkpoint.""", ) parser.add_argument( """--config_file""", default=None, type=str, required=True, help="""The json file for EfficientFormer model config.""", ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""") parser.add_argument( """--no-push_to_hub""", dest="""push_to_hub""", action="""store_false""", help="""Do not push model and image processor to the hub""", ) parser.set_defaults(push_to_hub=True) __UpperCamelCase : Dict = parser.parse_args() convert_efficientformer_checkpoint( checkpoint_path=args.pytorch_model_path, efficientformer_config_file=args.config_file, pytorch_dump_path=args.pytorch_dump_path, push_to_hub=args.push_to_hub, )
328
0
from __future__ import annotations from scipy.special import comb # type: ignore class __UpperCamelCase : def __init__( self: Dict , __UpperCamelCase: Union[str, Any] ): '''simple docstring''' __magic_name__ = list_of_points # Degree determines the flexibility of the curve. # Degree = 1 will produce a straight line. __magic_name__ = len(__UpperCamelCase ) - 1 def _SCREAMING_SNAKE_CASE ( self: Optional[int] , __UpperCamelCase: int ): '''simple docstring''' assert 0 <= t <= 1, "Time t must be between 0 and 1." __magic_name__ = [] for i in range(len(self.list_of_points ) ): # basis function for each i output_values.append( comb(self.degree , __UpperCamelCase ) * ((1 - t) ** (self.degree - i)) * (t**i) ) # the basis must sum up to 1 for it to produce a valid Bezier curve. assert round(sum(__UpperCamelCase ) , 5 ) == 1 return output_values def _SCREAMING_SNAKE_CASE ( self: Optional[int] , __UpperCamelCase: Optional[int] ): '''simple docstring''' assert 0 <= t <= 1, "Time t must be between 0 and 1." __magic_name__ = self.basis_function(__UpperCamelCase ) __magic_name__ = 0.0 __magic_name__ = 0.0 for i in range(len(self.list_of_points ) ): # For all points, sum up the product of i-th basis function and i-th point. x += basis_function[i] * self.list_of_points[i][0] y += basis_function[i] * self.list_of_points[i][1] return (x, y) def _SCREAMING_SNAKE_CASE ( self: Any , __UpperCamelCase: List[Any] = 0.01 ): '''simple docstring''' from matplotlib import pyplot as plt # type: ignore __magic_name__ = [] # x coordinates of points to plot __magic_name__ = [] # y coordinates of points to plot __magic_name__ = 0.0 while t <= 1: __magic_name__ = self.bezier_curve_function(__UpperCamelCase ) to_plot_x.append(value[0] ) to_plot_y.append(value[1] ) t += step_size __magic_name__ = [i[0] for i in self.list_of_points] __magic_name__ = [i[1] for i in self.list_of_points] plt.plot( __UpperCamelCase , __UpperCamelCase , color='blue' , label='Curve of Degree ' + str(self.degree ) , ) plt.scatter(__UpperCamelCase , __UpperCamelCase , color='red' , label='Control Points' ) plt.legend() plt.show() if __name__ == "__main__": import doctest doctest.testmod() BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1 BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2 BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
717
from __future__ import annotations def _lowercase ( a_ : list[int] ) -> int: '''simple docstring''' if not nums: return 0 __magic_name__ = nums[0] __magic_name__ = 0 for num in nums[1:]: __magic_name__, __magic_name__ = ( max_excluding + num, max(a_ ,a_ ), ) return max(a_ ,a_ ) if __name__ == "__main__": import doctest doctest.testmod()
184
0
from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = { '''alibaba-damo/mgp-str-base''': '''https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json''', } class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): """simple docstring""" A__ : Dict = "mgp-str" def __init__( self : List[Any] , _snake_case : str=[32, 1_28] , _snake_case : int=4 , _snake_case : List[Any]=3 , _snake_case : int=27 , _snake_case : Optional[int]=38 , _snake_case : Optional[int]=5_02_57 , _snake_case : Union[str, Any]=3_05_22 , _snake_case : Optional[Any]=7_68 , _snake_case : Dict=12 , _snake_case : Union[str, Any]=12 , _snake_case : Any=4.0 , _snake_case : Dict=True , _snake_case : List[str]=False , _snake_case : List[str]=1E-5 , _snake_case : Tuple=0.0 , _snake_case : List[str]=0.0 , _snake_case : str=0.0 , _snake_case : List[Any]=False , _snake_case : str=0.02 , **_snake_case : Optional[int] , ): """simple docstring""" super().__init__(**__snake_case ) A__ = image_size A__ = patch_size A__ = num_channels A__ = max_token_length A__ = num_character_labels A__ = num_bpe_labels A__ = num_wordpiece_labels A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = mlp_ratio A__ = distilled A__ = layer_norm_eps A__ = drop_rate A__ = qkv_bias A__ = attn_drop_rate A__ = drop_path_rate A__ = output_aa_attentions A__ = initializer_range
9
def _lowerCamelCase ( a_ : str): return credit_card_number.startswith(('''34''', '''35''', '''37''', '''4''', '''5''', '''6''')) def _lowerCamelCase ( a_ : str): lowerCamelCase :Union[str, Any] = credit_card_number lowerCamelCase :Tuple = 0 lowerCamelCase :Any = len(a_) - 2 for i in range(a_ , -1 , -2): # double the value of every second digit lowerCamelCase :Any = int(cc_number[i]) digit *= 2 # If doubling of a number results in a two digit number # i.e greater than 9(e.g., 6 × 2 = 12), # then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6), # to get a single digit number. if digit > 9: digit %= 10 digit += 1 lowerCamelCase :Optional[int] = cc_number[:i] + str(a_) + cc_number[i + 1 :] total += digit # Sum up the remaining digits for i in range(len(a_) - 1 , -1 , -2): total += int(cc_number[i]) return total % 10 == 0 def _lowerCamelCase ( a_ : str): lowerCamelCase :Union[str, Any] = F"{credit_card_number} is an invalid credit card number because" if not credit_card_number.isdigit(): print(F"{error_message} it has nonnumerical characters.") return False if not 13 <= len(a_) <= 16: print(F"{error_message} of its length.") return False if not validate_initial_digits(a_): print(F"{error_message} of its first two digits.") return False if not luhn_validation(a_): print(F"{error_message} it fails the Luhn check.") return False print(F"{credit_card_number} is a valid credit card number.") return True if __name__ == "__main__": import doctest doctest.testmod() validate_credit_card_number("""4111111111111111""") validate_credit_card_number("""32323""")
166
0
"""simple docstring""" import importlib import json import os import sys import tempfile import unittest from pathlib import Path import transformers import transformers.models.auto from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig from transformers.models.bert.configuration_bert import BertConfig from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils''')) from test_module.custom_configuration import CustomConfig # noqa E402 _lowerCamelCase = get_tests_dir('''fixtures/dummy-config.json''') class snake_case ( unittest.TestCase ): def SCREAMING_SNAKE_CASE_ ( self :str ): __SCREAMING_SNAKE_CASE : Tuple = 0 def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] ): self.assertIsNotNone(transformers.models.auto.__spec__ ) self.assertIsNotNone(importlib.util.find_spec('''transformers.models.auto''' ) ) def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] ): __SCREAMING_SNAKE_CASE : List[Any] = AutoConfig.from_pretrained('''bert-base-uncased''' ) self.assertIsInstance(_lowerCamelCase , _lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self :int ): __SCREAMING_SNAKE_CASE : Dict = AutoConfig.from_pretrained(_lowerCamelCase ) self.assertIsInstance(_lowerCamelCase , _lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self :Dict ): __SCREAMING_SNAKE_CASE : Union[str, Any] = AutoConfig.from_pretrained(_lowerCamelCase ) self.assertIsInstance(_lowerCamelCase , _lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self :Optional[int] ): __SCREAMING_SNAKE_CASE : Any = AutoConfig.for_model('''roberta''' ) self.assertIsInstance(_lowerCamelCase , _lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self :List[str] ): with tempfile.TemporaryDirectory() as tmp_dir: # This model name contains bert and roberta, but roberta ends up being picked. __SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(_lowerCamelCase , '''fake-roberta''' ) os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase ) with open(os.path.join(_lowerCamelCase , '''config.json''' ) , '''w''' ) as f: f.write(json.dumps({} ) ) __SCREAMING_SNAKE_CASE : int = AutoConfig.from_pretrained(_lowerCamelCase ) self.assertEqual(type(_lowerCamelCase ) , _lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self :Dict ): try: AutoConfig.register('''custom''' , _lowerCamelCase ) # Wrong model type will raise an error with self.assertRaises(_lowerCamelCase ): AutoConfig.register('''model''' , _lowerCamelCase ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(_lowerCamelCase ): AutoConfig.register('''bert''' , _lowerCamelCase ) # Now that the config is registered, it can be used as any other config with the auto-API __SCREAMING_SNAKE_CASE : Any = CustomConfig() with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(_lowerCamelCase ) __SCREAMING_SNAKE_CASE : List[Any] = AutoConfig.from_pretrained(_lowerCamelCase ) self.assertIsInstance(_lowerCamelCase , _lowerCamelCase ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] ): with self.assertRaisesRegex( _lowerCamelCase , '''bert-base is not a local folder and is not a valid model identifier''' ): __SCREAMING_SNAKE_CASE : List[str] = AutoConfig.from_pretrained('''bert-base''' ) def SCREAMING_SNAKE_CASE_ ( self :Tuple ): with self.assertRaisesRegex( _lowerCamelCase , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): __SCREAMING_SNAKE_CASE : Optional[int] = AutoConfig.from_pretrained(_lowerCamelCase , revision='''aaaaaa''' ) def SCREAMING_SNAKE_CASE_ ( self :Dict ): with self.assertRaisesRegex( _lowerCamelCase , '''hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.''' , ): __SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained('''hf-internal-testing/no-config-test-repo''' ) def SCREAMING_SNAKE_CASE_ ( self :str ): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(_lowerCamelCase ): __SCREAMING_SNAKE_CASE : Union[str, Any] = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' ) # If remote code is disabled, we can't load this config. with self.assertRaises(_lowerCamelCase ): __SCREAMING_SNAKE_CASE : Tuple = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=_lowerCamelCase ) __SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=_lowerCamelCase ) self.assertEqual(config.__class__.__name__ , '''NewModelConfig''' ) # Test config can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(_lowerCamelCase ) __SCREAMING_SNAKE_CASE : List[Any] = AutoConfig.from_pretrained(_lowerCamelCase , trust_remote_code=_lowerCamelCase ) self.assertEqual(reloaded_config.__class__.__name__ , '''NewModelConfig''' ) def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] ): class snake_case ( __UpperCAmelCase ): lowerCamelCase__ = '''new-model''' try: AutoConfig.register('''new-model''' , _lowerCamelCase ) # If remote code is not set, the default is to use local __SCREAMING_SNAKE_CASE : Optional[int] = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' ) self.assertEqual(config.__class__.__name__ , '''NewModelConfigLocal''' ) # If remote code is disabled, we load the local one. __SCREAMING_SNAKE_CASE : Any = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=_lowerCamelCase ) self.assertEqual(config.__class__.__name__ , '''NewModelConfigLocal''' ) # If remote is enabled, we load from the Hub __SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=_lowerCamelCase ) self.assertEqual(config.__class__.__name__ , '''NewModelConfig''' ) finally: if "new-model" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["new-model"]
401
"""simple docstring""" _lowerCamelCase = ''' # Installazione di Transformers ! pip install transformers datasets # Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e # rimuovi la modalità commento al comando seguente. # ! pip install git+https://github.com/huggingface/transformers.git ''' _lowerCamelCase = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}] _lowerCamelCase = { '''{processor_class}''': '''FakeProcessorClass''', '''{model_class}''': '''FakeModelClass''', '''{object_class}''': '''FakeObjectClass''', }
401
1
'''simple docstring''' from math import pi, sqrt def lowercase__ ( __UpperCamelCase : float ): '''simple docstring''' if num <= 0: raise ValueError("""math domain error""" ) if num > 171.5: raise OverflowError("""math range error""" ) elif num - int(__UpperCamelCase ) not in (0, 0.5): raise NotImplementedError("""num must be an integer or a half-integer""" ) elif num == 0.5: return sqrt(__UpperCamelCase ) else: return 1.0 if num == 1 else (num - 1) * gamma(num - 1 ) def lowercase__ ( ): '''simple docstring''' assert gamma(0.5 ) == sqrt(__UpperCamelCase ) assert gamma(1 ) == 1.0 assert gamma(2 ) == 1.0 if __name__ == "__main__": from doctest import testmod testmod() snake_case : List[Any] = 1.0 while num: snake_case : List[str] = float(input('Gamma of: ')) print(F"""gamma({num}) = {gamma(num)}""") print('\nEnter 0 to exit...')
566
'''simple docstring''' import argparse import os import transformers from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS from .utils import logging logging.set_verbosity_info() snake_case : Any = logging.get_logger(__name__) snake_case : Union[str, Any] = {name: getattr(transformers, name + 'Fast') for name in SLOW_TO_FAST_CONVERTERS} def lowercase__ ( __UpperCamelCase : List[Any] , __UpperCamelCase : str , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] ): '''simple docstring''' if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES: raise ValueError(F'''Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.''' ) if tokenizer_name is None: __lowercase = TOKENIZER_CLASSES else: __lowercase = {tokenizer_name: getattr(__UpperCamelCase , tokenizer_name + """Fast""" )} logger.info(F'''Loading tokenizer classes: {tokenizer_names}''' ) for tokenizer_name in tokenizer_names: __lowercase = TOKENIZER_CLASSES[tokenizer_name] __lowercase = True if checkpoint_name is None: __lowercase = list(tokenizer_class.max_model_input_sizes.keys() ) else: __lowercase = [checkpoint_name] logger.info(F'''For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}''' ) for checkpoint in checkpoint_names: logger.info(F'''Loading {tokenizer_class.__class__.__name__} {checkpoint}''' ) # Load tokenizer __lowercase = tokenizer_class.from_pretrained(__UpperCamelCase , force_download=__UpperCamelCase ) # Save fast tokenizer logger.info(F'''Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}''' ) # For organization names we create sub-directories if "/" in checkpoint: __lowercase , __lowercase = checkpoint.split("""/""" ) __lowercase = os.path.join(__UpperCamelCase , __UpperCamelCase ) elif add_prefix: __lowercase = checkpoint __lowercase = dump_path else: __lowercase = None __lowercase = dump_path logger.info(F'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' ) if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]: __lowercase = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint] __lowercase = file_path.split(__UpperCamelCase )[-1][0] if next_char == "/": __lowercase = os.path.join(__UpperCamelCase , __UpperCamelCase ) __lowercase = None logger.info(F'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' ) __lowercase = tokenizer.save_pretrained( __UpperCamelCase , legacy_format=__UpperCamelCase , filename_prefix=__UpperCamelCase ) logger.info(F'''=> File names {file_names}''' ) for file_name in file_names: if not file_name.endswith("""tokenizer.json""" ): os.remove(__UpperCamelCase ) logger.info(F'''=> removing {file_name}''' ) if __name__ == "__main__": snake_case : int = argparse.ArgumentParser() # Required parameters parser.add_argument( '--dump_path', default=None, type=str, required=True, help='Path to output generated fast tokenizer files.' ) parser.add_argument( '--tokenizer_name', default=None, type=str, help=( F"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """ 'download and convert all the checkpoints from AWS.' ), ) parser.add_argument( '--checkpoint_name', default=None, type=str, help='Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.', ) parser.add_argument( '--force_download', action='store_true', help='Re-download checkpoints.', ) snake_case : Tuple = parser.parse_args() convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
566
1
a = tuple[float, float, float] a = tuple[float, float, float] def UpperCamelCase_( __magic_name__ : Pointad , __magic_name__ : Pointad ): """simple docstring""" _lowerCAmelCase :int = end_pointa[0] - end_pointa[0] _lowerCAmelCase :Any = end_pointa[1] - end_pointa[1] _lowerCAmelCase :List[Any] = end_pointa[2] - end_pointa[2] return (x, y, z) def UpperCamelCase_( __magic_name__ : Vectorad , __magic_name__ : Vectorad ): """simple docstring""" _lowerCAmelCase :Dict = ab[1] * ac[2] - ab[2] * ac[1] # *i _lowerCAmelCase :List[Any] = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j _lowerCAmelCase :List[Any] = ab[0] * ac[1] - ab[1] * ac[0] # *k return (x, y, z) def UpperCamelCase_( __magic_name__ : Vectorad , __magic_name__ : int ): """simple docstring""" return tuple(round(__magic_name__ , __magic_name__ ) for x in vector ) == (0, 0, 0) def UpperCamelCase_( __magic_name__ : Pointad , __magic_name__ : Pointad , __magic_name__ : Pointad , __magic_name__ : int = 10 ): """simple docstring""" _lowerCAmelCase :Optional[int] = create_vector(__magic_name__ , __magic_name__ ) _lowerCAmelCase :Dict = create_vector(__magic_name__ , __magic_name__ ) return is_zero_vector(get_ad_vectors_cross(__magic_name__ , __magic_name__ ) , __magic_name__ )
702
import argparse import os import jax as jnp import numpy as onp import torch import torch.nn as nn from music_spectrogram_diffusion import inference from tax import checkpoints from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder a = """base_with_context""" def UpperCamelCase_( __magic_name__ : Tuple , __magic_name__ : List[str] ): """simple docstring""" _lowerCAmelCase :Dict = nn.Parameter(torch.FloatTensor(weights['token_embedder']['embedding'] ) ) _lowerCAmelCase :Union[str, Any] = nn.Parameter( torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=__magic_name__ ) for lyr_num, lyr in enumerate(model.encoders ): _lowerCAmelCase :Optional[int] = weights[f"""layers_{lyr_num}"""] _lowerCAmelCase :Union[str, Any] = nn.Parameter( torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) ) _lowerCAmelCase :Any = ly_weight['attention'] _lowerCAmelCase :Any = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) ) _lowerCAmelCase :Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) ) _lowerCAmelCase :Any = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) ) _lowerCAmelCase :Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) ) _lowerCAmelCase :List[Any] = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) ) _lowerCAmelCase :Any = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) ) _lowerCAmelCase :List[str] = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) ) _lowerCAmelCase :List[str] = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) ) _lowerCAmelCase :Optional[int] = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) ) return model def UpperCamelCase_( __magic_name__ : Dict , __magic_name__ : Tuple ): """simple docstring""" _lowerCAmelCase :int = nn.Parameter(torch.FloatTensor(weights['input_proj']['kernel'].T ) ) _lowerCAmelCase :Union[str, Any] = nn.Parameter( torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=__magic_name__ ) for lyr_num, lyr in enumerate(model.encoders ): _lowerCAmelCase :Any = weights[f"""layers_{lyr_num}"""] _lowerCAmelCase :str = ly_weight['attention'] _lowerCAmelCase :str = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) ) _lowerCAmelCase :Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) ) _lowerCAmelCase :Tuple = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) ) _lowerCAmelCase :str = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) ) _lowerCAmelCase :Union[str, Any] = nn.Parameter( torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) ) _lowerCAmelCase :Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) ) _lowerCAmelCase :int = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) ) _lowerCAmelCase :Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) ) _lowerCAmelCase :List[str] = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) ) _lowerCAmelCase :str = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) ) return model def UpperCamelCase_( __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] ): """simple docstring""" _lowerCAmelCase :List[str] = nn.Parameter(torch.FloatTensor(weights['time_emb_dense0']['kernel'].T ) ) _lowerCAmelCase :Any = nn.Parameter(torch.FloatTensor(weights['time_emb_dense1']['kernel'].T ) ) _lowerCAmelCase :Dict = nn.Parameter( torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=__magic_name__ ) _lowerCAmelCase :List[Any] = nn.Parameter( torch.FloatTensor(weights['continuous_inputs_projection']['kernel'].T ) ) for lyr_num, lyr in enumerate(model.decoders ): _lowerCAmelCase :int = weights[f"""layers_{lyr_num}"""] _lowerCAmelCase :Tuple = nn.Parameter( torch.FloatTensor(ly_weight['pre_self_attention_layer_norm']['scale'] ) ) _lowerCAmelCase :Tuple = nn.Parameter( torch.FloatTensor(ly_weight['FiLMLayer_0']['DenseGeneral_0']['kernel'].T ) ) _lowerCAmelCase :Tuple = ly_weight['self_attention'] _lowerCAmelCase :Dict = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) ) _lowerCAmelCase :Tuple = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) ) _lowerCAmelCase :Tuple = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) ) _lowerCAmelCase :List[Any] = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) ) _lowerCAmelCase :List[Any] = ly_weight['MultiHeadDotProductAttention_0'] _lowerCAmelCase :Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) ) _lowerCAmelCase :Any = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) ) _lowerCAmelCase :str = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) ) _lowerCAmelCase :str = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) ) _lowerCAmelCase :Union[str, Any] = nn.Parameter( torch.FloatTensor(ly_weight['pre_cross_attention_layer_norm']['scale'] ) ) _lowerCAmelCase :List[str] = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) ) _lowerCAmelCase :int = nn.Parameter( torch.FloatTensor(ly_weight['FiLMLayer_1']['DenseGeneral_0']['kernel'].T ) ) _lowerCAmelCase :Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) ) _lowerCAmelCase :List[Any] = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) ) _lowerCAmelCase :List[str] = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) ) _lowerCAmelCase :List[str] = nn.Parameter(torch.FloatTensor(weights['decoder_norm']['scale'] ) ) _lowerCAmelCase :Optional[Any] = nn.Parameter(torch.FloatTensor(weights['spec_out_dense']['kernel'].T ) ) return model def UpperCamelCase_( __magic_name__ : Optional[Any] ): """simple docstring""" _lowerCAmelCase :Dict = checkpoints.load_tax_checkpoint(args.checkpoint_path ) _lowerCAmelCase :Tuple = jnp.tree_util.tree_map(onp.array , __magic_name__ ) _lowerCAmelCase :List[str] = [ 'from __gin__ import dynamic_registration', 'from music_spectrogram_diffusion.models.diffusion import diffusion_utils', 'diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0', 'diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()', ] _lowerCAmelCase :Any = os.path.join(args.checkpoint_path , '..' , 'config.gin' ) _lowerCAmelCase :Tuple = inference.parse_training_gin_file(__magic_name__ , __magic_name__ ) _lowerCAmelCase :List[Any] = inference.InferenceModel(args.checkpoint_path , __magic_name__ ) _lowerCAmelCase :Dict = DDPMScheduler(beta_schedule='squaredcos_cap_v2' , variance_type='fixed_large' ) _lowerCAmelCase :Dict = SpectrogramNotesEncoder( max_length=synth_model.sequence_length['inputs'] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , ) _lowerCAmelCase :Any = SpectrogramContEncoder( input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['targets_context'] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , ) _lowerCAmelCase :Any = TaFilmDecoder( input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['targets_context'] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , ) _lowerCAmelCase :str = load_notes_encoder(ta_checkpoint['target']['token_encoder'] , __magic_name__ ) _lowerCAmelCase :Optional[int] = load_continuous_encoder(ta_checkpoint['target']['continuous_encoder'] , __magic_name__ ) _lowerCAmelCase :List[str] = load_decoder(ta_checkpoint['target']['decoder'] , __magic_name__ ) _lowerCAmelCase :Tuple = OnnxRuntimeModel.from_pretrained('kashif/soundstream_mel_decoder' ) _lowerCAmelCase :Union[str, Any] = SpectrogramDiffusionPipeline( notes_encoder=__magic_name__ , continuous_encoder=__magic_name__ , decoder=__magic_name__ , scheduler=__magic_name__ , melgan=__magic_name__ , ) if args.save: pipe.save_pretrained(args.output_path ) if __name__ == "__main__": a = argparse.ArgumentParser() parser.add_argument("""--output_path""", default=None, type=str, required=True, help="""Path to the converted model.""") parser.add_argument( """--save""", default=True, type=bool, required=False, help="""Whether to save the converted model or not.""" ) parser.add_argument( """--checkpoint_path""", default=F'''{MODEL}/checkpoint_500000''', type=str, required=False, help="""Path to the original jax model checkpoint.""", ) a = parser.parse_args() main(args)
382
0
'''simple docstring''' from __future__ import annotations import unittest import numpy as np from transformers import BlipTextConfig from transformers.testing_utils import require_tf, slow from transformers.utils import is_tf_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask if is_tf_available(): import tensorflow as tf from transformers import TFBlipTextModel from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST class SCREAMING_SNAKE_CASE__ : def __init__( self : Optional[Any] , a_ : Optional[int] , a_ : Optional[int]=12 , a_ : Dict=7 , a_ : str=True , a_ : List[Any]=True , a_ : Any=True , a_ : List[str]=99 , a_ : int=32 , a_ : Any=32 , a_ : Union[str, Any]=2 , a_ : Optional[int]=4 , a_ : Any=37 , a_ : List[str]=0.1 , a_ : int=0.1 , a_ : str=512 , a_ : Optional[Any]=0.02 , a_ : List[Any]=0 , a_ : Any=None , ): """simple docstring""" __snake_case = parent __snake_case = batch_size __snake_case = seq_length __snake_case = is_training __snake_case = use_input_mask __snake_case = use_labels __snake_case = vocab_size __snake_case = hidden_size __snake_case = projection_dim __snake_case = num_hidden_layers __snake_case = num_attention_heads __snake_case = intermediate_size __snake_case = dropout __snake_case = attention_dropout __snake_case = max_position_embeddings __snake_case = initializer_range __snake_case = scope __snake_case = bos_token_id def A ( self : Tuple ): """simple docstring""" __snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __snake_case = None if self.use_input_mask: __snake_case = random_attention_mask([self.batch_size, self.seq_length] ) if input_mask is not None: __snake_case = input_mask.numpy() __snake_case , __snake_case = input_mask.shape __snake_case = np.random.randint(1 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(a_ ): __snake_case = 1 __snake_case = 0 __snake_case = self.get_config() return config, input_ids, tf.convert_to_tensor(a_ ) def A ( self : List[str] ): """simple docstring""" return BlipTextConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , ) def A ( self : Optional[int] , a_ : Union[str, Any] , a_ : List[str] , a_ : str ): """simple docstring""" __snake_case = TFBlipTextModel(config=a_ ) __snake_case = model(a_ , attention_mask=a_ , training=a_ ) __snake_case = model(a_ , training=a_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def A ( self : str ): """simple docstring""" __snake_case = self.prepare_config_and_inputs() __snake_case , __snake_case , __snake_case = config_and_inputs __snake_case = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE = (TFBlipTextModel,) if is_tf_available() else () __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False def A ( self : Optional[Any] ): """simple docstring""" __snake_case = BlipTextModelTester(self ) __snake_case = ConfigTester(self , config_class=a_ , hidden_size=37 ) def A ( self : Dict ): """simple docstring""" self.config_tester.run_common_tests() def A ( self : Tuple ): """simple docstring""" __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a_ ) def A ( self : str ): """simple docstring""" pass def A ( self : List[Any] ): """simple docstring""" pass @unittest.skip(reason="Blip does not use inputs_embeds" ) def A ( self : str ): """simple docstring""" pass @unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" ) def A ( self : Optional[Any] ): """simple docstring""" pass @unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" ) def A ( self : int ): """simple docstring""" pass @slow def A ( self : Optional[int] ): """simple docstring""" for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case = TFBlipTextModel.from_pretrained(a_ ) self.assertIsNotNone(a_ ) def A ( self : Optional[int] , a_ : int=True ): """simple docstring""" super().test_pt_tf_model_equivalence(allow_missing_keys=a_ )
69
'''simple docstring''' from ...processing_utils import ProcessorMixin class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): __SCREAMING_SNAKE_CASE = """SpeechT5FeatureExtractor""" __SCREAMING_SNAKE_CASE = """SpeechT5Tokenizer""" def __init__( self : List[Any] , a_ : str , a_ : str ): """simple docstring""" super().__init__(a_ , a_ ) def __call__( self : Dict , *a_ : Tuple , **a_ : List[str] ): """simple docstring""" __snake_case = kwargs.pop("audio" , a_ ) __snake_case = kwargs.pop("text" , a_ ) __snake_case = kwargs.pop("text_target" , a_ ) __snake_case = kwargs.pop("audio_target" , a_ ) __snake_case = kwargs.pop("sampling_rate" , a_ ) if audio is not None and text is not None: raise ValueError( "Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?" ) if audio_target is not None and text_target is not None: raise ValueError( "Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?" ) if audio is None and audio_target is None and text is None and text_target is None: raise ValueError( "You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process." ) if audio is not None: __snake_case = self.feature_extractor(a_ , *a_ , sampling_rate=a_ , **a_ ) elif text is not None: __snake_case = self.tokenizer(a_ , **a_ ) else: __snake_case = None if audio_target is not None: __snake_case = self.feature_extractor(audio_target=a_ , *a_ , sampling_rate=a_ , **a_ ) __snake_case = targets["input_values"] elif text_target is not None: __snake_case = self.tokenizer(a_ , **a_ ) __snake_case = targets["input_ids"] else: __snake_case = None if inputs is None: return targets if targets is not None: __snake_case = labels __snake_case = targets.get("attention_mask" ) if decoder_attention_mask is not None: __snake_case = decoder_attention_mask return inputs def A ( self : List[str] , *a_ : str , **a_ : Dict ): """simple docstring""" __snake_case = kwargs.pop("input_values" , a_ ) __snake_case = kwargs.pop("input_ids" , a_ ) __snake_case = kwargs.pop("labels" , a_ ) if input_values is not None and input_ids is not None: raise ValueError("Cannot process both `input_values` and `input_ids` inputs." ) if input_values is None and input_ids is None and labels is None: raise ValueError( "You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded." ) if input_values is not None: __snake_case = self.feature_extractor.pad(a_ , *a_ , **a_ ) elif input_ids is not None: __snake_case = self.tokenizer.pad(a_ , **a_ ) else: __snake_case = None if labels is not None: if "input_ids" in labels or (isinstance(a_ , a_ ) and "input_ids" in labels[0]): __snake_case = self.tokenizer.pad(a_ , **a_ ) __snake_case = targets["input_ids"] else: __snake_case = self.feature_extractor.feature_size __snake_case = self.feature_extractor.num_mel_bins __snake_case = self.feature_extractor.pad(a_ , *a_ , **a_ ) __snake_case = feature_size_hack __snake_case = targets["input_values"] else: __snake_case = None if inputs is None: return targets if targets is not None: __snake_case = labels __snake_case = targets.get("attention_mask" ) if decoder_attention_mask is not None: __snake_case = decoder_attention_mask return inputs def A ( self : List[str] , *a_ : Any , **a_ : List[str] ): """simple docstring""" return self.tokenizer.batch_decode(*a_ , **a_ ) def A ( self : Optional[int] , *a_ : Union[str, Any] , **a_ : str ): """simple docstring""" return self.tokenizer.decode(*a_ , **a_ )
69
1
from pathlib import PurePosixPath from typing import Optional import fsspec from fsspec import AbstractFileSystem from huggingface_hub.hf_api import DatasetInfo from ..utils.file_utils import get_authentication_headers_for_url from ..utils.hub import hf_hub_url class _a ( UpperCamelCase__ ): _lowercase : Union[str, Any] = '''''' _lowercase : List[Any] = '''hf-legacy''' # "hf://"" is reserved for hffs def __init__( self: Union[str, Any] , UpperCamelCase_: Optional[DatasetInfo] = None , UpperCamelCase_: Optional[str] = None , **UpperCamelCase_: int , ) -> Any: """simple docstring""" super().__init__(self , **UpperCamelCase_ ) lowercase__ = repo_info lowercase__ = token lowercase__ = None def lowerCamelCase_ ( self: Optional[Any] ) -> Any: """simple docstring""" if self.dir_cache is None: lowercase__ = {} for hf_file in self.repo_info.siblings: # TODO(QL): add sizes lowercase__ = { '''name''': hf_file.rfilename, '''size''': None, '''type''': '''file''', } self.dir_cache.update( { str(UpperCamelCase_ ): {'''name''': str(UpperCamelCase_ ), '''size''': None, '''type''': '''directory'''} for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1] } ) def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: str , UpperCamelCase_: str = "rb" , **UpperCamelCase_: Tuple , ) -> Optional[Any]: """simple docstring""" if not isinstance(self.repo_info , UpperCamelCase_ ): raise NotImplementedError(f'Open is only implemented for dataset repositories, but got {self.repo_info}' ) lowercase__ = hf_hub_url(self.repo_info.id , UpperCamelCase_ , revision=self.repo_info.sha ) return fsspec.open( UpperCamelCase_ , mode=UpperCamelCase_ , headers=get_authentication_headers_for_url(UpperCamelCase_ , use_auth_token=self.token ) , client_kwargs={'''trust_env''': True} , ).open() def lowerCamelCase_ ( self: Dict , UpperCamelCase_: str , **UpperCamelCase_: Any ) -> List[Any]: """simple docstring""" self._get_dirs() lowercase__ = self._strip_protocol(UpperCamelCase_ ) if path in self.dir_cache: return self.dir_cache[path] else: raise FileNotFoundError(UpperCamelCase_ ) def lowerCamelCase_ ( self: Dict , UpperCamelCase_: List[Any] , UpperCamelCase_: Optional[Any]=False , **UpperCamelCase_: List[str] ) -> Tuple: """simple docstring""" self._get_dirs() lowercase__ = PurePosixPath(path.strip('''/''' ) ) lowercase__ = {} for p, f in self.dir_cache.items(): lowercase__ = PurePosixPath(p.strip('''/''' ) ) lowercase__ = p.parent if root == path: lowercase__ = f lowercase__ = list(paths.values() ) if detail: return out else: return sorted(f['''name'''] for f in out )
429
from __future__ import annotations import inspect import unittest from typing import List, Tuple from transformers import RegNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _a : def __init__( self: str , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[Any]=3 , UpperCamelCase_: int=32 , UpperCamelCase_: Union[str, Any]=3 , UpperCamelCase_: str=10 , UpperCamelCase_: Tuple=[10, 20, 30, 40] , UpperCamelCase_: str=[1, 1, 2, 1] , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Optional[int]=True , UpperCamelCase_: str="relu" , UpperCamelCase_: Optional[Any]=3 , UpperCamelCase_: Union[str, Any]=None , ) -> Dict: """simple docstring""" lowercase__ = parent lowercase__ = batch_size lowercase__ = image_size lowercase__ = num_channels lowercase__ = embeddings_size lowercase__ = hidden_sizes lowercase__ = depths lowercase__ = is_training lowercase__ = use_labels lowercase__ = hidden_act lowercase__ = num_labels lowercase__ = scope lowercase__ = len(UpperCamelCase_ ) def lowerCamelCase_ ( self: List[str] ) -> Optional[Any]: """simple docstring""" lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase__ = None if self.use_labels: lowercase__ = ids_tensor([self.batch_size] , self.num_labels ) lowercase__ = self.get_config() return config, pixel_values, labels def lowerCamelCase_ ( self: int ) -> Dict: """simple docstring""" return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , ) def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: Dict , UpperCamelCase_: List[str] , UpperCamelCase_: Any ) -> Optional[Any]: """simple docstring""" lowercase__ = TFRegNetModel(config=UpperCamelCase_ ) lowercase__ = model(UpperCamelCase_ , training=UpperCamelCase_ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def lowerCamelCase_ ( self: str , UpperCamelCase_: Dict , UpperCamelCase_: Any , UpperCamelCase_: Any ) -> Tuple: """simple docstring""" lowercase__ = self.num_labels lowercase__ = TFRegNetForImageClassification(UpperCamelCase_ ) lowercase__ = model(UpperCamelCase_ , labels=UpperCamelCase_ , training=UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase_ ( self: Optional[Any] ) -> List[Any]: """simple docstring""" lowercase__ = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ = config_and_inputs lowercase__ = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class _a ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ): _lowercase : Optional[int] = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else () _lowercase : str = ( {'''feature-extraction''': TFRegNetModel, '''image-classification''': TFRegNetForImageClassification} if is_tf_available() else {} ) _lowercase : Union[str, Any] = False _lowercase : Optional[int] = False _lowercase : List[Any] = False _lowercase : Dict = False _lowercase : List[str] = False def lowerCamelCase_ ( self: Dict ) -> Dict: """simple docstring""" lowercase__ = TFRegNetModelTester(self ) lowercase__ = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ ) def lowerCamelCase_ ( self: str ) -> Optional[int]: """simple docstring""" return @unittest.skip(reason='''RegNet does not use inputs_embeds''' ) def lowerCamelCase_ ( self: Optional[int] ) -> Any: """simple docstring""" pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , ) @slow def lowerCamelCase_ ( self: Tuple ) -> Dict: """simple docstring""" super().test_keras_fit() @unittest.skip(reason='''RegNet does not support input and output embeddings''' ) def lowerCamelCase_ ( self: Dict ) -> Optional[Any]: """simple docstring""" pass def lowerCamelCase_ ( self: Optional[int] ) -> Optional[Any]: """simple docstring""" lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ = model_class(UpperCamelCase_ ) lowercase__ = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__ = [*signature.parameters.keys()] lowercase__ = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , UpperCamelCase_ ) def lowerCamelCase_ ( self: int ) -> str: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase_ ) def lowerCamelCase_ ( self: List[str] ) -> Optional[Any]: """simple docstring""" def check_hidden_states_output(UpperCamelCase_: List[str] , UpperCamelCase_: Any , UpperCamelCase_: Dict ): lowercase__ = model_class(UpperCamelCase_ ) lowercase__ = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) , training=UpperCamelCase_ ) lowercase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowercase__ = self.model_tester.num_stages self.assertEqual(len(UpperCamelCase_ ) , expected_num_stages + 1 ) # RegNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , ) lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ = ['''basic''', '''bottleneck'''] for model_class in self.all_model_classes: for layer_type in layers_type: lowercase__ = layer_type lowercase__ = True check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase__ = True check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) def lowerCamelCase_ ( self: List[str] ) -> Dict: """simple docstring""" lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() def check_equivalence(UpperCamelCase_: int , UpperCamelCase_: List[Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[Any]={} ): lowercase__ = model(UpperCamelCase_ , return_dict=UpperCamelCase_ , **UpperCamelCase_ ) lowercase__ = model(UpperCamelCase_ , return_dict=UpperCamelCase_ , **UpperCamelCase_ ).to_tuple() def recursive_check(UpperCamelCase_: Optional[int] , UpperCamelCase_: Any ): if isinstance(UpperCamelCase_ , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(UpperCamelCase_ , UpperCamelCase_ ): recursive_check(UpperCamelCase_ , UpperCamelCase_ ) elif tuple_object is None: return else: self.assertTrue( all(tf.equal(UpperCamelCase_ , UpperCamelCase_ ) ) , msg=( '''Tuple and dict output are not equal. Difference:''' f' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}' ) , ) recursive_check(UpperCamelCase_ , UpperCamelCase_ ) for model_class in self.all_model_classes: lowercase__ = model_class(UpperCamelCase_ ) lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) check_equivalence(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ , return_labels=UpperCamelCase_ ) lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ , return_labels=UpperCamelCase_ ) check_equivalence(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) check_equivalence(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , {'''output_hidden_states''': True} ) lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ , return_labels=UpperCamelCase_ ) lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ , return_labels=UpperCamelCase_ ) check_equivalence(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , {'''output_hidden_states''': True} ) def lowerCamelCase_ ( self: List[Any] ) -> str: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_ ) @slow def lowerCamelCase_ ( self: List[str] ) -> Dict: """simple docstring""" for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ = TFRegNetModel.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) def _a ( ): """simple docstring""" lowercase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class _a ( unittest.TestCase ): @cached_property def lowerCamelCase_ ( self: Any ) -> Optional[int]: """simple docstring""" return ( AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def lowerCamelCase_ ( self: Dict ) -> Dict: """simple docstring""" lowercase__ = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) lowercase__ = self.default_image_processor lowercase__ = prepare_img() lowercase__ = image_processor(images=UpperCamelCase_ , return_tensors='''tf''' ) # forward pass lowercase__ = model(**UpperCamelCase_ , training=UpperCamelCase_ ) # verify the logits lowercase__ = tf.TensorShape((1, 1_000) ) self.assertEqual(outputs.logits.shape , UpperCamelCase_ ) lowercase__ = tf.constant([-0.4180, -1.5051, -3.4836] ) tf.debugging.assert_near(outputs.logits[0, :3] , UpperCamelCase_ , atol=1E-4 )
429
1
'''simple docstring''' import argparse from collections import defaultdict import yaml lowerCAmelCase_ : Tuple = 'docs/source/en/_toctree.yml' def UpperCAmelCase ( A : Union[str, Any] ): SCREAMING_SNAKE_CASE : Dict = defaultdict(A ) for doc in model_doc: counts[doc["local"]] += 1 SCREAMING_SNAKE_CASE : Tuple = [key for key, value in counts.items() if value > 1] SCREAMING_SNAKE_CASE : Tuple = [] for duplicate_key in duplicates: SCREAMING_SNAKE_CASE : Optional[int] = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} ) if len(A ) > 1: raise ValueError( F"""{duplicate_key} is present several times in the documentation table of content at """ '''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the ''' '''others.''' ) # Only add this once new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] ) # Sort return sorted(A , key=lambda A : s["title"].lower() ) def UpperCAmelCase ( A : Any=False ): with open(A , encoding='''utf-8''' ) as f: SCREAMING_SNAKE_CASE : str = yaml.safe_load(f.read() ) # Get to the API doc SCREAMING_SNAKE_CASE : Any = 0 while content[api_idx]["title"] != "API": api_idx += 1 SCREAMING_SNAKE_CASE : Dict = content[api_idx]['''sections'''] # Then to the model doc SCREAMING_SNAKE_CASE : List[str] = 0 while api_doc[model_idx]["title"] != "Models": model_idx += 1 SCREAMING_SNAKE_CASE : Dict = api_doc[model_idx]['''sections'''] SCREAMING_SNAKE_CASE : List[Any] = [(idx, section) for idx, section in enumerate(A ) if '''sections''' in section] SCREAMING_SNAKE_CASE : int = False for idx, modality_doc in modalities_docs: SCREAMING_SNAKE_CASE : List[str] = modality_doc['''sections'''] SCREAMING_SNAKE_CASE : List[str] = clean_model_doc_toc(A ) if old_modality_doc != new_modality_doc: SCREAMING_SNAKE_CASE : Dict = True if overwrite: SCREAMING_SNAKE_CASE : int = new_modality_doc if diff: if overwrite: SCREAMING_SNAKE_CASE : List[Any] = model_doc SCREAMING_SNAKE_CASE : Union[str, Any] = api_doc with open(A , '''w''' , encoding='''utf-8''' ) as f: f.write(yaml.dump(A , allow_unicode=A ) ) else: raise ValueError( '''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' ) if __name__ == "__main__": lowerCAmelCase_ : Optional[int] = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') lowerCAmelCase_ : Any = parser.parse_args() check_model_doc(args.fix_and_overwrite)
527
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class lowerCamelCase_ ( snake_case_ , unittest.TestCase ): _lowerCAmelCase : str = KandinskyInpaintPipeline _lowerCAmelCase : List[Any] = ['prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image'] _lowerCAmelCase : int = [ 'prompt', 'negative_prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image', ] _lowerCAmelCase : List[str] = [ 'generator', 'height', 'width', 'latents', 'guidance_scale', 'negative_prompt', 'num_inference_steps', 'return_dict', 'guidance_scale', 'num_images_per_prompt', 'output_type', 'return_dict', ] _lowerCAmelCase : Union[str, Any] = False @property def __lowercase ( self : str ): """simple docstring""" return 32 @property def __lowercase ( self : str ): """simple docstring""" return 32 @property def __lowercase ( self : Optional[int] ): """simple docstring""" return self.time_input_dim @property def __lowercase ( self : int ): """simple docstring""" return self.time_input_dim * 4 @property def __lowercase ( self : int ): """simple docstring""" return 1_00 @property def __lowercase ( self : Tuple ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' ) return tokenizer @property def __lowercase ( self : List[Any] ): """simple docstring""" torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : List[Any] = MCLIPConfig( numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , ) SCREAMING_SNAKE_CASE : Any = MultilingualCLIP(lowerCAmelCase__ ) SCREAMING_SNAKE_CASE : Union[str, Any] = text_encoder.eval() return text_encoder @property def __lowercase ( self : List[Any] ): """simple docstring""" torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Tuple = { '''in_channels''': 9, # Out channels is double in channels because predicts mean and variance '''out_channels''': 8, '''addition_embed_type''': '''text_image''', '''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''), '''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''), '''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''', '''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2), '''layers_per_block''': 1, '''encoder_hid_dim''': self.text_embedder_hidden_size, '''encoder_hid_dim_type''': '''text_image_proj''', '''cross_attention_dim''': self.cross_attention_dim, '''attention_head_dim''': 4, '''resnet_time_scale_shift''': '''scale_shift''', '''class_embed_type''': None, } SCREAMING_SNAKE_CASE : Union[str, Any] = UNetaDConditionModel(**lowerCAmelCase__ ) return model @property def __lowercase ( self : Optional[Any] ): """simple docstring""" return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def __lowercase ( self : Union[str, Any] ): """simple docstring""" torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Optional[Any] = VQModel(**self.dummy_movq_kwargs ) return model def __lowercase ( self : Optional[int] ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = self.dummy_text_encoder SCREAMING_SNAKE_CASE : Tuple = self.dummy_tokenizer SCREAMING_SNAKE_CASE : int = self.dummy_unet SCREAMING_SNAKE_CASE : str = self.dummy_movq SCREAMING_SNAKE_CASE : Optional[int] = DDIMScheduler( num_train_timesteps=10_00 , beta_schedule='''linear''' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=lowerCAmelCase__ , set_alpha_to_one=lowerCAmelCase__ , steps_offset=1 , prediction_type='''epsilon''' , thresholding=lowerCAmelCase__ , ) SCREAMING_SNAKE_CASE : Optional[int] = { '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''unet''': unet, '''scheduler''': scheduler, '''movq''': movq, } return components def __lowercase ( self : Dict , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[Any]=0 ): """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ ) SCREAMING_SNAKE_CASE : List[Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(lowerCAmelCase__ ) # create init_image SCREAMING_SNAKE_CASE : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ ) SCREAMING_SNAKE_CASE : Tuple = image.cpu().permute(0 , 2 , 3 , 1 )[0] SCREAMING_SNAKE_CASE : Union[str, Any] = Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert('''RGB''' ).resize((2_56, 2_56) ) # create mask SCREAMING_SNAKE_CASE : Union[str, Any] = np.ones((64, 64) , dtype=np.floataa ) SCREAMING_SNAKE_CASE : str = 0 if str(lowerCAmelCase__ ).startswith('''mps''' ): SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(lowerCAmelCase__ ) else: SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ ) SCREAMING_SNAKE_CASE : Tuple = { '''prompt''': '''horse''', '''image''': init_image, '''mask_image''': mask, '''image_embeds''': image_embeds, '''negative_image_embeds''': negative_image_embeds, '''generator''': generator, '''height''': 64, '''width''': 64, '''num_inference_steps''': 2, '''guidance_scale''': 4.0, '''output_type''': '''np''', } return inputs def __lowercase ( self : List[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = '''cpu''' SCREAMING_SNAKE_CASE : Dict = self.get_dummy_components() SCREAMING_SNAKE_CASE : Optional[int] = self.pipeline_class(**lowerCAmelCase__ ) SCREAMING_SNAKE_CASE : int = pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) SCREAMING_SNAKE_CASE : str = pipe(**self.get_dummy_inputs(lowerCAmelCase__ ) ) SCREAMING_SNAKE_CASE : int = output.images SCREAMING_SNAKE_CASE : int = pipe( **self.get_dummy_inputs(lowerCAmelCase__ ) , return_dict=lowerCAmelCase__ , )[0] SCREAMING_SNAKE_CASE : Optional[Any] = image[0, -3:, -3:, -1] SCREAMING_SNAKE_CASE : Dict = image_from_tuple[0, -3:, -3:, -1] print(F"""image.shape {image.shape}""" ) assert image.shape == (1, 64, 64, 3) SCREAMING_SNAKE_CASE : List[str] = np.array( [0.832_6919, 0.7379_0467, 0.2091_8581, 0.930_9612, 0.551_1791, 0.4371_3328, 0.551_3321, 0.4992_2934, 0.5949_7786] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}""" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}""" def __lowercase ( self : str ): """simple docstring""" super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class lowerCamelCase_ ( unittest.TestCase ): def __lowercase ( self : List[str] ): """simple docstring""" # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowercase ( self : Dict ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy''' ) SCREAMING_SNAKE_CASE : Optional[Any] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' ) SCREAMING_SNAKE_CASE : Union[str, Any] = np.ones((7_68, 7_68) , dtype=np.floataa ) SCREAMING_SNAKE_CASE : List[str] = 0 SCREAMING_SNAKE_CASE : Dict = '''a hat''' SCREAMING_SNAKE_CASE : str = KandinskyPriorPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa ) pipe_prior.to(lowerCAmelCase__ ) SCREAMING_SNAKE_CASE : Union[str, Any] = KandinskyInpaintPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-1-inpaint''' , torch_dtype=torch.floataa ) SCREAMING_SNAKE_CASE : Dict = pipeline.to(lowerCAmelCase__ ) pipeline.set_progress_bar_config(disable=lowerCAmelCase__ ) SCREAMING_SNAKE_CASE : Optional[int] = torch.Generator(device='''cpu''' ).manual_seed(0 ) SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Any = pipe_prior( lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple() SCREAMING_SNAKE_CASE : Tuple = pipeline( lowerCAmelCase__ , image=lowerCAmelCase__ , mask_image=lowerCAmelCase__ , image_embeds=lowerCAmelCase__ , negative_image_embeds=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type='''np''' , ) SCREAMING_SNAKE_CASE : Tuple = output.images[0] assert image.shape == (7_68, 7_68, 3) assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
527
1
"""simple docstring""" from typing import Any, Dict, List, Optional, Tuple, Union import torch from torch import nn from torch.utils.data import DistributedSampler, RandomSampler from transformers import PreTrainedModel, Trainer, logging from transformers.integrations import is_fairscale_available from transformers.models.fsmt.configuration_fsmt import FSMTConfig from transformers.optimization import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.trainer_pt_utils import get_tpu_sampler from transformers.training_args import ParallelMode from transformers.utils import is_torch_tpu_available if is_fairscale_available(): from fairscale.optim import OSS __snake_case : Dict = logging.get_logger(__name__) __snake_case : Union[str, Any] = { 'linear': get_linear_schedule_with_warmup, 'cosine': get_cosine_schedule_with_warmup, 'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup, 'polynomial': get_polynomial_decay_schedule_with_warmup, 'constant': get_constant_schedule, 'constant_w_warmup': get_constant_schedule_with_warmup, } class A__ ( UpperCamelCase_ ): '''simple docstring''' def __init__( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: Union[str, Any]=None , _SCREAMING_SNAKE_CASE: Optional[int]=None , *_SCREAMING_SNAKE_CASE: Dict , **_SCREAMING_SNAKE_CASE: str) -> Tuple: """simple docstring""" super().__init__(*UpperCamelCase__ , **UpperCamelCase__) if config is None: assert isinstance(self.model , UpperCamelCase__), ( "If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is" F""" {self.model.__class__}""" ) __lowerCAmelCase : List[Any] = self.model.config else: __lowerCAmelCase : Union[str, Any] = config __lowerCAmelCase : List[Any] = data_args __lowerCAmelCase : str = self.config.tgt_vocab_size if isinstance(self.config , UpperCamelCase__) else self.config.vocab_size if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss): assert self.config.pad_token_id is not None, ( "Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss" " calculation or doing label smoothing." ) if self.config.pad_token_id is None and self.config.eos_token_id is not None: logger.warning( F"""The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for""" " padding..") if self.args.label_smoothing == 0: __lowerCAmelCase : int = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id) else: # dynamically import label_smoothed_nll_loss from utils import label_smoothed_nll_loss __lowerCAmelCase : List[str] = label_smoothed_nll_loss def _SCREAMING_SNAKE_CASE ( self: Any , _SCREAMING_SNAKE_CASE: int) -> Dict: """simple docstring""" if self.optimizer is None: __lowerCAmelCase : str = ['''bias''', '''LayerNorm.weight'''] __lowerCAmelCase : Optional[int] = [ { '''params''': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay)], '''weight_decay''': self.args.weight_decay, }, { '''params''': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay)], '''weight_decay''': 0.0, }, ] __lowerCAmelCase : int = Adafactor if self.args.adafactor else AdamW if self.args.adafactor: __lowerCAmelCase : str = Adafactor __lowerCAmelCase : Optional[Any] = {'''scale_parameter''': False, '''relative_step''': False} else: __lowerCAmelCase : Optional[int] = AdamW __lowerCAmelCase : Any = { '''betas''': (self.args.adam_betaa, self.args.adam_betaa), '''eps''': self.args.adam_epsilon, } __lowerCAmelCase : Dict = self.args.learning_rate if self.sharded_ddp: __lowerCAmelCase : Any = OSS( params=UpperCamelCase__ , optim=UpperCamelCase__ , **UpperCamelCase__ , ) else: __lowerCAmelCase : Any = optimizer_cls(UpperCamelCase__ , **UpperCamelCase__) if self.lr_scheduler is None: __lowerCAmelCase : List[Any] = self._get_lr_scheduler(UpperCamelCase__) else: # ignoring --lr_scheduler logger.warning("scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.") def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: List[Any]) -> List[Any]: """simple docstring""" __lowerCAmelCase : Union[str, Any] = arg_to_scheduler[self.args.lr_scheduler] if self.args.lr_scheduler == "constant": __lowerCAmelCase : List[str] = schedule_func(self.optimizer) elif self.args.lr_scheduler == "constant_w_warmup": __lowerCAmelCase : Optional[Any] = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps) else: __lowerCAmelCase : str = schedule_func( self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=UpperCamelCase__) return scheduler def _SCREAMING_SNAKE_CASE ( self: Any) -> List[Any]: """simple docstring""" if isinstance(self.train_dataset , torch.utils.data.IterableDataset): return None elif is_torch_tpu_available(): return get_tpu_sampler(self.train_dataset) else: if self.args.sortish_sampler: self.train_dataset.make_sortish_sampler( self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , ) return ( RandomSampler(self.train_dataset) if self.args.local_rank == -1 else DistributedSampler(self.train_dataset) ) def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: List[str]) -> Any: """simple docstring""" if self.args.label_smoothing == 0: if self.data_args is not None and self.data_args.ignore_pad_token_for_loss: # force training to ignore pad token __lowerCAmelCase : List[str] = model(**UpperCamelCase__ , use_cache=UpperCamelCase__)[0] __lowerCAmelCase : List[str] = self.loss_fn(logits.view(-1 , logits.shape[-1]) , labels.view(-1)) else: # compute usual loss via models __lowerCAmelCase : Union[str, Any] = model(**UpperCamelCase__ , labels=UpperCamelCase__ , use_cache=UpperCamelCase__)[:2] else: # compute label smoothed loss __lowerCAmelCase : List[str] = model(**UpperCamelCase__ , use_cache=UpperCamelCase__)[0] __lowerCAmelCase : int = torch.nn.functional.log_softmax(UpperCamelCase__ , dim=-1) __lowerCAmelCase : int = self.loss_fn(UpperCamelCase__ , UpperCamelCase__ , self.args.label_smoothing , ignore_index=self.config.pad_token_id) return loss, logits def _SCREAMING_SNAKE_CASE ( self: str , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Union[str, Any]) -> List[Any]: """simple docstring""" __lowerCAmelCase : List[str] = inputs.pop("labels") __lowerCAmelCase : List[Any] = self._compute_loss(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__) return loss def _SCREAMING_SNAKE_CASE ( self: str , _SCREAMING_SNAKE_CASE: nn.Module , _SCREAMING_SNAKE_CASE: Dict[str, Union[torch.Tensor, Any]] , _SCREAMING_SNAKE_CASE: bool , _SCREAMING_SNAKE_CASE: Optional[List[str]] = None , ) -> Dict: """simple docstring""" __lowerCAmelCase : Union[str, Any] = self._prepare_inputs(UpperCamelCase__) __lowerCAmelCase : Dict = { '''max_length''': self.data_args.val_max_target_length if self.data_args is not None else self.config.max_length, '''num_beams''': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams, } if self.args.predict_with_generate and not self.args.prediction_loss_only: __lowerCAmelCase : Optional[Any] = self.model.generate( inputs["input_ids"] , attention_mask=inputs["attention_mask"] , **UpperCamelCase__ , ) # in case the batch is shorter than max length, the output should be padded if generated_tokens.shape[-1] < gen_kwargs["max_length"]: __lowerCAmelCase : Optional[Any] = self._pad_tensors_to_max_len(UpperCamelCase__ , gen_kwargs["max_length"]) __lowerCAmelCase : Tuple = inputs.pop("labels") with torch.no_grad(): # compute loss on predict data __lowerCAmelCase : int = self._compute_loss(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__) __lowerCAmelCase : Optional[Any] = loss.mean().detach() if self.args.prediction_loss_only: return (loss, None, None) __lowerCAmelCase : int = generated_tokens if self.args.predict_with_generate else logits if labels.shape[-1] < gen_kwargs["max_length"]: __lowerCAmelCase : Dict = self._pad_tensors_to_max_len(UpperCamelCase__ , gen_kwargs["max_length"]) return (loss, logits, labels) def _SCREAMING_SNAKE_CASE ( self: List[str] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Optional[int]) -> List[str]: """simple docstring""" __lowerCAmelCase : Union[str, Any] = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id if pad_token_id is None: raise ValueError( "Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be" F""" padded to `max_length`={max_length}""") __lowerCAmelCase : List[Any] = pad_token_id * torch.ones( (tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device) __lowerCAmelCase : Tuple = tensor return padded_tensor
712
"""simple docstring""" import re from filelock import FileLock try: import nltk __snake_case : Any = True except (ImportError, ModuleNotFoundError): __snake_case : Optional[int] = False if NLTK_AVAILABLE: with FileLock('.lock') as lock: nltk.download('punkt', quiet=True) def _lowercase ( __snake_case ) -> str: re.sub("<n>" ,"" ,__snake_case ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(__snake_case ) )
615
0
"""simple docstring""" # coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import sys import transformers lowercase_ = "3" print("Python version:", sys.version) print("transformers version:", transformers.__version__) try: import torch print("Torch version:", torch.__version__) print("Cuda available:", torch.cuda.is_available()) print("Cuda version:", torch.version.cuda) print("CuDNN version:", torch.backends.cudnn.version()) print("Number of GPUs available:", torch.cuda.device_count()) print("NCCL version:", torch.cuda.nccl.version()) except ImportError: print("Torch version:", None) try: import deepspeed print("DeepSpeed version:", deepspeed.__version__) except ImportError: print("DeepSpeed version:", None) try: import tensorflow as tf print("TensorFlow version:", tf.__version__) print("TF GPUs available:", bool(tf.config.list_physical_devices("GPU"))) print("Number of TF GPUs available:", len(tf.config.list_physical_devices("GPU"))) except ImportError: print("TensorFlow version:", None)
695
"""simple docstring""" def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any=False ) -> Any: if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): __a = len(set_a.intersection(lowerCAmelCase__ ) ) if alternative_union: __a = len(lowerCAmelCase__ ) + len(lowerCAmelCase__ ) else: __a = len(set_a.union(lowerCAmelCase__ ) ) return intersection / union if isinstance(lowerCAmelCase__ , (list, tuple) ) and isinstance(lowerCAmelCase__ , (list, tuple) ): __a = [element for element in set_a if element in set_b] if alternative_union: __a = len(lowerCAmelCase__ ) + len(lowerCAmelCase__ ) return len(lowerCAmelCase__ ) / union else: __a = set_a + [element for element in set_b if element not in set_a] return len(lowerCAmelCase__ ) / len(lowerCAmelCase__ ) return len(lowerCAmelCase__ ) / len(lowerCAmelCase__ ) return None if __name__ == "__main__": lowercase_ = {"a", "b", "c", "d", "e"} lowercase_ = {"c", "d", "e", "f", "h", "i"} print(jaccard_similarity(set_a, set_b))
695
1
import argparse import os import re A__ : Dict = 'src/transformers/models/auto' # re pattern that matches mapping introductions: # SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict A__ : List[Any] = re.compile(r'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict') # re pattern that matches identifiers in mappings A__ : Union[str, Any] = re.compile(r'\s*\(\s*"(\S[^"]+)"') def a ( lowerCamelCase_ , lowerCamelCase_ = False ): '''simple docstring''' with open(lowerCamelCase_ , '''r''' , encoding='''utf-8''' ) as f: lowercase__ = f.read() lowercase__ = content.split('''\n''' ) lowercase__ = [] lowercase__ = 0 while line_idx < len(lowerCamelCase_ ): if _re_intro_mapping.search(lines[line_idx] ) is not None: lowercase__ = len(re.search(r'''^(\s*)\S''' , lines[line_idx] ).groups()[0] ) + 8 # Start of a new mapping! while not lines[line_idx].startswith(''' ''' * indent + '''(''' ): new_lines.append(lines[line_idx] ) line_idx += 1 lowercase__ = [] while lines[line_idx].strip() != "]": # Blocks either fit in one line or not if lines[line_idx].strip() == "(": lowercase__ = line_idx while not lines[line_idx].startswith(''' ''' * indent + ''')''' ): line_idx += 1 blocks.append('''\n'''.join(lines[start_idx : line_idx + 1] ) ) else: blocks.append(lines[line_idx] ) line_idx += 1 # Sort blocks by their identifiers lowercase__ = sorted(lowerCamelCase_ , key=lambda lowerCamelCase_ : _re_identifier.search(lowerCamelCase_ ).groups()[0] ) new_lines += blocks else: new_lines.append(lines[line_idx] ) line_idx += 1 if overwrite: with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as f: f.write('''\n'''.join(lowerCamelCase_ ) ) elif "\n".join(lowerCamelCase_ ) != content: return True def a ( lowerCamelCase_ = False ): '''simple docstring''' lowercase__ = [os.path.join(lowerCamelCase_ , lowerCamelCase_ ) for f in os.listdir(lowerCamelCase_ ) if f.endswith('''.py''' )] lowercase__ = [sort_auto_mapping(lowerCamelCase_ , overwrite=lowerCamelCase_ ) for fname in fnames] if not overwrite and any(lowerCamelCase_ ): lowercase__ = [f for f, d in zip(lowerCamelCase_ , lowerCamelCase_ ) if d] raise ValueError( F"""The following files have auto mappings that need sorting: {', '.join(lowerCamelCase_ )}. Run `make style` to fix""" ''' this.''' ) if __name__ == "__main__": A__ : Union[str, Any] = argparse.ArgumentParser() parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.') A__ : Dict = parser.parse_args() sort_all_auto_mappings(not args.check_only)
700
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bert import BertTokenizer A__ : Dict = logging.get_logger(__name__) A__ : Dict = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} A__ : Optional[int] = { 'vocab_file': { 'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt', 'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt', 'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt', 'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt', 'bert-base-multilingual-uncased': ( 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt' ), 'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt', 'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt', 'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt', 'bert-large-uncased-whole-word-masking': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt' ), 'bert-large-cased-whole-word-masking': ( 'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt' ), 'bert-large-uncased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt' ), 'bert-large-cased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt' ), 'bert-base-cased-finetuned-mrpc': ( 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt' ), 'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt', 'bert-base-german-dbmdz-uncased': ( 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt' ), 'TurkuNLP/bert-base-finnish-cased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt' ), 'TurkuNLP/bert-base-finnish-uncased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt' ), 'wietsedv/bert-base-dutch-cased': ( 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json', 'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json', 'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json', 'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json', 'bert-base-multilingual-uncased': ( 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json' ), 'bert-base-multilingual-cased': ( 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json' ), 'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json', 'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json', 'bert-large-uncased-whole-word-masking': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json' ), 'bert-large-cased-whole-word-masking': ( 'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json' ), 'bert-large-uncased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json' ), 'bert-large-cased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json' ), 'bert-base-cased-finetuned-mrpc': ( 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json' ), 'bert-base-german-dbmdz-cased': ( 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json' ), 'bert-base-german-dbmdz-uncased': ( 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json' ), 'TurkuNLP/bert-base-finnish-cased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json' ), 'TurkuNLP/bert-base-finnish-uncased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json' ), 'wietsedv/bert-base-dutch-cased': ( 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json' ), }, } A__ : List[str] = { 'bert-base-uncased': 5_12, 'bert-large-uncased': 5_12, 'bert-base-cased': 5_12, 'bert-large-cased': 5_12, 'bert-base-multilingual-uncased': 5_12, 'bert-base-multilingual-cased': 5_12, 'bert-base-chinese': 5_12, 'bert-base-german-cased': 5_12, 'bert-large-uncased-whole-word-masking': 5_12, 'bert-large-cased-whole-word-masking': 5_12, 'bert-large-uncased-whole-word-masking-finetuned-squad': 5_12, 'bert-large-cased-whole-word-masking-finetuned-squad': 5_12, 'bert-base-cased-finetuned-mrpc': 5_12, 'bert-base-german-dbmdz-cased': 5_12, 'bert-base-german-dbmdz-uncased': 5_12, 'TurkuNLP/bert-base-finnish-cased-v1': 5_12, 'TurkuNLP/bert-base-finnish-uncased-v1': 5_12, 'wietsedv/bert-base-dutch-cased': 5_12, } A__ : Optional[int] = { 'bert-base-uncased': {'do_lower_case': True}, 'bert-large-uncased': {'do_lower_case': True}, 'bert-base-cased': {'do_lower_case': False}, 'bert-large-cased': {'do_lower_case': False}, 'bert-base-multilingual-uncased': {'do_lower_case': True}, 'bert-base-multilingual-cased': {'do_lower_case': False}, 'bert-base-chinese': {'do_lower_case': False}, 'bert-base-german-cased': {'do_lower_case': False}, 'bert-large-uncased-whole-word-masking': {'do_lower_case': True}, 'bert-large-cased-whole-word-masking': {'do_lower_case': False}, 'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True}, 'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False}, 'bert-base-cased-finetuned-mrpc': {'do_lower_case': False}, 'bert-base-german-dbmdz-cased': {'do_lower_case': False}, 'bert-base-german-dbmdz-uncased': {'do_lower_case': True}, 'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False}, 'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True}, 'wietsedv/bert-base-dutch-cased': {'do_lower_case': False}, } class _UpperCAmelCase ( A__ ): """simple docstring""" lowercase__ = VOCAB_FILES_NAMES lowercase__ = PRETRAINED_VOCAB_FILES_MAP lowercase__ = PRETRAINED_INIT_CONFIGURATION lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase__ = BertTokenizer def __init__( self : Any, lowerCamelCase : Optional[Any]=None, lowerCamelCase : Any=None, lowerCamelCase : Tuple=True, lowerCamelCase : Dict="[UNK]", lowerCamelCase : Any="[SEP]", lowerCamelCase : List[Any]="[PAD]", lowerCamelCase : Optional[Any]="[CLS]", lowerCamelCase : Dict="[MASK]", lowerCamelCase : List[Any]=True, lowerCamelCase : Tuple=None, **lowerCamelCase : Dict, ): '''simple docstring''' super().__init__( lowerCamelCase, tokenizer_file=lowerCamelCase, do_lower_case=lowerCamelCase, unk_token=lowerCamelCase, sep_token=lowerCamelCase, pad_token=lowerCamelCase, cls_token=lowerCamelCase, mask_token=lowerCamelCase, tokenize_chinese_chars=lowerCamelCase, strip_accents=lowerCamelCase, **lowerCamelCase, ) lowercase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''', lowerCamelCase ) != do_lower_case or normalizer_state.get('''strip_accents''', lowerCamelCase ) != strip_accents or normalizer_state.get('''handle_chinese_chars''', lowerCamelCase ) != tokenize_chinese_chars ): lowercase__ = getattr(lowerCamelCase, normalizer_state.pop('''type''' ) ) lowercase__ = do_lower_case lowercase__ = strip_accents lowercase__ = tokenize_chinese_chars lowercase__ = normalizer_class(**lowerCamelCase ) lowercase__ = do_lower_case def lowercase__ ( self : Any, lowerCamelCase : List[Any], lowerCamelCase : Dict=None ): '''simple docstring''' lowercase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def lowercase__ ( self : List[Any], lowerCamelCase : List[int], lowerCamelCase : Optional[List[int]] = None ): '''simple docstring''' lowercase__ = [self.sep_token_id] lowercase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowercase__ ( self : Any, lowerCamelCase : str, lowerCamelCase : Optional[str] = None ): '''simple docstring''' lowercase__ = self._tokenizer.model.save(lowerCamelCase, name=lowerCamelCase ) return tuple(lowerCamelCase )
671
0
import flax.linen as nn import jax import jax.numpy as jnp class A__ ( nn.Module ): _UpperCAmelCase :int _UpperCAmelCase :jnp.dtype = jnp.floataa def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[int] = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self , A_ ): '''simple docstring''' UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Tuple = hidden_states.shape UpperCamelCase : Union[str, Any] = jax.image.resize( A_ , shape=(batch, height * 2, width * 2, channels) , method="nearest" , ) UpperCamelCase : Optional[Any] = self.conv(A_ ) return hidden_states class A__ ( nn.Module ): _UpperCAmelCase :int _UpperCAmelCase :jnp.dtype = jnp.floataa def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[Any] = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self , A_ ): '''simple docstring''' UpperCamelCase : Dict = self.conv(A_ ) return hidden_states class A__ ( nn.Module ): _UpperCAmelCase :int _UpperCAmelCase :int = None _UpperCAmelCase :float = 0.0 _UpperCAmelCase :bool = None _UpperCAmelCase :jnp.dtype = jnp.floataa def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Dict = self.in_channels if self.out_channels is None else self.out_channels UpperCamelCase : List[str] = nn.GroupNorm(num_groups=32 , epsilon=1e-5 ) UpperCamelCase : int = nn.Conv( A_ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) UpperCamelCase : int = nn.Dense(A_ , dtype=self.dtype ) UpperCamelCase : Tuple = nn.GroupNorm(num_groups=32 , epsilon=1e-5 ) UpperCamelCase : Optional[Any] = nn.Dropout(self.dropout_prob ) UpperCamelCase : Tuple = nn.Conv( A_ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) UpperCamelCase : str = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut UpperCamelCase : List[Any] = None if use_nin_shortcut: UpperCamelCase : Union[str, Any] = nn.Conv( A_ , kernel_size=(1, 1) , strides=(1, 1) , padding="VALID" , dtype=self.dtype , ) def __call__( self , A_ , A_ , A_=True ): '''simple docstring''' UpperCamelCase : Optional[int] = hidden_states UpperCamelCase : Any = self.norma(A_ ) UpperCamelCase : Tuple = nn.swish(A_ ) UpperCamelCase : List[str] = self.conva(A_ ) UpperCamelCase : List[str] = self.time_emb_proj(nn.swish(A_ ) ) UpperCamelCase : Union[str, Any] = jnp.expand_dims(jnp.expand_dims(A_ , 1 ) , 1 ) UpperCamelCase : Dict = hidden_states + temb UpperCamelCase : Optional[int] = self.norma(A_ ) UpperCamelCase : Tuple = nn.swish(A_ ) UpperCamelCase : Optional[Any] = self.dropout(A_ , A_ ) UpperCamelCase : Optional[Any] = self.conva(A_ ) if self.conv_shortcut is not None: UpperCamelCase : Union[str, Any] = self.conv_shortcut(A_ ) return hidden_states + residual
629
import math import os import unittest from transformers import MegatronBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, MegatronBertModel, ) class A__ : def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=64 , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ): '''simple docstring''' UpperCamelCase : Dict = parent UpperCamelCase : int = batch_size UpperCamelCase : Dict = seq_length UpperCamelCase : Union[str, Any] = is_training UpperCamelCase : int = use_input_mask UpperCamelCase : Optional[Any] = use_token_type_ids UpperCamelCase : Optional[Any] = use_labels UpperCamelCase : str = vocab_size UpperCamelCase : Union[str, Any] = hidden_size UpperCamelCase : Any = embedding_size UpperCamelCase : List[Any] = num_hidden_layers UpperCamelCase : Optional[int] = num_attention_heads UpperCamelCase : Optional[Any] = intermediate_size UpperCamelCase : Dict = hidden_act UpperCamelCase : Union[str, Any] = hidden_dropout_prob UpperCamelCase : Optional[Any] = attention_probs_dropout_prob UpperCamelCase : List[str] = max_position_embeddings UpperCamelCase : List[Any] = type_vocab_size UpperCamelCase : Any = type_sequence_label_size UpperCamelCase : Optional[int] = initializer_range UpperCamelCase : str = num_labels UpperCamelCase : List[str] = num_choices UpperCamelCase : Tuple = scope def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase : List[str] = None if self.use_input_mask: UpperCamelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] ) UpperCamelCase : Tuple = None if self.use_token_type_ids: UpperCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCamelCase : Dict = None UpperCamelCase : Union[str, Any] = None UpperCamelCase : Dict = None if self.use_labels: UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_choices ) UpperCamelCase : Optional[Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __UpperCamelCase( self ): '''simple docstring''' return MegatronBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A_ , initializer_range=self.initializer_range , ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : Optional[int] = MegatronBertModel(config=A_ ) model.to(A_ ) model.eval() UpperCamelCase : Optional[Any] = model(A_ , attention_mask=A_ , token_type_ids=A_ ) UpperCamelCase : Dict = model(A_ , token_type_ids=A_ ) UpperCamelCase : Union[str, Any] = model(A_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : List[str] = MegatronBertForMaskedLM(config=A_ ) model.to(A_ ) model.eval() UpperCamelCase : Optional[int] = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : int = MegatronBertForCausalLM(config=A_ ) model.to(A_ ) model.eval() UpperCamelCase : Dict = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : int = MegatronBertForNextSentencePrediction(config=A_ ) model.to(A_ ) model.eval() UpperCamelCase : Dict = model( A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : Optional[int] = MegatronBertForPreTraining(config=A_ ) model.to(A_ ) model.eval() UpperCamelCase : str = model( A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ , next_sentence_label=A_ , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : str = MegatronBertForQuestionAnswering(config=A_ ) model.to(A_ ) model.eval() UpperCamelCase : Union[str, Any] = model( A_ , attention_mask=A_ , token_type_ids=A_ , start_positions=A_ , end_positions=A_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : List[str] = self.num_labels UpperCamelCase : Optional[int] = MegatronBertForSequenceClassification(A_ ) model.to(A_ ) model.eval() UpperCamelCase : List[Any] = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : Union[str, Any] = self.num_labels UpperCamelCase : List[str] = MegatronBertForTokenClassification(config=A_ ) model.to(A_ ) model.eval() UpperCamelCase : List[str] = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : int = self.num_choices UpperCamelCase : int = MegatronBertForMultipleChoice(config=A_ ) model.to(A_ ) model.eval() UpperCamelCase : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCamelCase : Tuple = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCamelCase : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCamelCase : Tuple = model( A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[Any] = self.prepare_config_and_inputs() ( ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ) : Any = config_and_inputs UpperCamelCase : Optional[int] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class A__ ( __snake_case , __snake_case , unittest.TestCase ): _UpperCAmelCase :Tuple = ( ( MegatronBertModel, MegatronBertForMaskedLM, MegatronBertForCausalLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, ) if is_torch_available() else () ) _UpperCAmelCase :Optional[Any] = ( { 'feature-extraction': MegatronBertModel, 'fill-mask': MegatronBertForMaskedLM, 'question-answering': MegatronBertForQuestionAnswering, 'text-classification': MegatronBertForSequenceClassification, 'text-generation': MegatronBertForCausalLM, 'token-classification': MegatronBertForTokenClassification, 'zero-shot': MegatronBertForSequenceClassification, } if is_torch_available() else {} ) _UpperCAmelCase :Optional[Any] = True # test_resize_embeddings = False _UpperCAmelCase :Optional[Any] = False def __UpperCamelCase( self , A_ , A_ , A_=False ): '''simple docstring''' UpperCamelCase : Any = super()._prepare_for_class(A_ , A_ , return_labels=A_ ) if return_labels: if model_class in get_values(A_ ): UpperCamelCase : str = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=A_ ) UpperCamelCase : str = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=A_ ) return inputs_dict def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[int] = MegatronBertModelTester(self ) UpperCamelCase : Optional[int] = ConfigTester(self , config_class=A_ , hidden_size=37 ) def __UpperCamelCase( self ): '''simple docstring''' self.config_tester.run_common_tests() def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_model(*A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_masked_lm(*A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_pretraining(*A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_question_answering(*A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_token_classification(*A_ ) def A_ ( _lowerCAmelCase ) -> int: return torch.tensor( _lowerCAmelCase , dtype=torch.long , device=_lowerCAmelCase , ) __lowerCamelCase : Optional[int] = 1E-4 @require_torch @require_sentencepiece @require_tokenizers class A__ ( unittest.TestCase ): @slow @unittest.skip("Model is not available." ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[str] = "nvidia/megatron-bert-uncased-345m" if "MYDIR" in os.environ: UpperCamelCase : Union[str, Any] = os.path.join(os.environ["MYDIR"] , A_ ) UpperCamelCase : List[Any] = MegatronBertModel.from_pretrained(A_ ) model.to(A_ ) model.half() UpperCamelCase : Optional[Any] = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] ) with torch.no_grad(): UpperCamelCase : Tuple = model(A_ )[0] UpperCamelCase : Dict = torch.Size((1, 9, 1024) ) self.assertEqual(output.shape , A_ ) UpperCamelCase : Union[str, Any] = [-0.60_40, -0.25_17, -0.10_25, 0.34_20, -0.67_58, -0.00_17, -0.10_89, -0.19_90, 0.57_28] for ii in range(3 ): for jj in range(3 ): UpperCamelCase : List[str] = output[0, ii, jj] UpperCamelCase : List[Any] = expected[3 * ii + jj] UpperCamelCase : Optional[Any] = "ii={} jj={} a={} b={}".format(A_ , A_ , A_ , A_ ) self.assertTrue(math.isclose(A_ , A_ , rel_tol=A_ , abs_tol=A_ ) , msg=A_ )
629
1
'''simple docstring''' import argparse import json import math import os import time import traceback import zipfile from collections import Counter import requests def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> Tuple: """simple docstring""" _SCREAMING_SNAKE_CASE = None if token is not None: _SCREAMING_SNAKE_CASE = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"Bearer {token}"} _SCREAMING_SNAKE_CASE = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100" _SCREAMING_SNAKE_CASE = requests.get(SCREAMING_SNAKE_CASE_ , headers=SCREAMING_SNAKE_CASE_ ).json() _SCREAMING_SNAKE_CASE = {} try: job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} ) _SCREAMING_SNAKE_CASE = math.ceil((result["""total_count"""] - 1_00) / 1_00 ) for i in range(SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE = requests.get(url + F"&page={i + 2}" , headers=SCREAMING_SNAKE_CASE_ ).json() job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} ) return job_links except Exception: print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" ) return {} def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> Optional[int]: """simple docstring""" _SCREAMING_SNAKE_CASE = None if token is not None: _SCREAMING_SNAKE_CASE = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"Bearer {token}"} _SCREAMING_SNAKE_CASE = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100" _SCREAMING_SNAKE_CASE = requests.get(SCREAMING_SNAKE_CASE_ , headers=SCREAMING_SNAKE_CASE_ ).json() _SCREAMING_SNAKE_CASE = {} try: artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} ) _SCREAMING_SNAKE_CASE = math.ceil((result["""total_count"""] - 1_00) / 1_00 ) for i in range(SCREAMING_SNAKE_CASE_ ): _SCREAMING_SNAKE_CASE = requests.get(url + F"&page={i + 2}" , headers=SCREAMING_SNAKE_CASE_ ).json() artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} ) return artifacts except Exception: print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" ) return {} def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int: """simple docstring""" _SCREAMING_SNAKE_CASE = None if token is not None: _SCREAMING_SNAKE_CASE = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"Bearer {token}"} _SCREAMING_SNAKE_CASE = requests.get(SCREAMING_SNAKE_CASE_ , headers=SCREAMING_SNAKE_CASE_ , allow_redirects=SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = result.headers["""Location"""] _SCREAMING_SNAKE_CASE = requests.get(SCREAMING_SNAKE_CASE_ , allow_redirects=SCREAMING_SNAKE_CASE_ ) _SCREAMING_SNAKE_CASE = os.path.join(SCREAMING_SNAKE_CASE_ , F"{artifact_name}.zip" ) with open(SCREAMING_SNAKE_CASE_ , """wb""" ) as fp: fp.write(response.content ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> str: """simple docstring""" _SCREAMING_SNAKE_CASE = [] _SCREAMING_SNAKE_CASE = [] _SCREAMING_SNAKE_CASE = None with zipfile.ZipFile(SCREAMING_SNAKE_CASE_ ) as z: for filename in z.namelist(): if not os.path.isdir(SCREAMING_SNAKE_CASE_ ): # read the file if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]: with z.open(SCREAMING_SNAKE_CASE_ ) as f: for line in f: _SCREAMING_SNAKE_CASE = line.decode("""UTF-8""" ).strip() if filename == "failures_line.txt": try: # `error_line` is the place where `error` occurs _SCREAMING_SNAKE_CASE = line[: line.index(""": """ )] _SCREAMING_SNAKE_CASE = line[line.index(""": """ ) + len(""": """ ) :] errors.append([error_line, error] ) except Exception: # skip un-related lines pass elif filename == "summary_short.txt" and line.startswith("""FAILED """ ): # `test` is the test method that failed _SCREAMING_SNAKE_CASE = line[len("""FAILED """ ) :] failed_tests.append(SCREAMING_SNAKE_CASE_ ) elif filename == "job_name.txt": _SCREAMING_SNAKE_CASE = line if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ): raise ValueError( F"`errors` and `failed_tests` should have the same number of elements. Got {len(SCREAMING_SNAKE_CASE_ )} for `errors` " F"and {len(SCREAMING_SNAKE_CASE_ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some" """ problem.""" ) _SCREAMING_SNAKE_CASE = None if job_name and job_links: _SCREAMING_SNAKE_CASE = job_links.get(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # A list with elements of the form (line of error, error, failed test) _SCREAMING_SNAKE_CASE = [x + [y] + [job_link] for x, y in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )] return result def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> List[str]: """simple docstring""" _SCREAMING_SNAKE_CASE = [] _SCREAMING_SNAKE_CASE = [os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for p in os.listdir(SCREAMING_SNAKE_CASE_ ) if p.endswith(""".zip""" )] for p in paths: errors.extend(get_errors_from_single_artifact(SCREAMING_SNAKE_CASE_ , job_links=SCREAMING_SNAKE_CASE_ ) ) return errors def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> Optional[int]: """simple docstring""" _SCREAMING_SNAKE_CASE = Counter() counter.update([x[1] for x in logs] ) _SCREAMING_SNAKE_CASE = counter.most_common() _SCREAMING_SNAKE_CASE = {} for error, count in counts: if error_filter is None or error not in error_filter: _SCREAMING_SNAKE_CASE = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]} _SCREAMING_SNAKE_CASE = dict(sorted(r.items() , key=lambda SCREAMING_SNAKE_CASE_ : item[1]["count"] , reverse=SCREAMING_SNAKE_CASE_ ) ) return r def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: """simple docstring""" _SCREAMING_SNAKE_CASE = test.split("""::""" )[0] if test.startswith("""tests/models/""" ): _SCREAMING_SNAKE_CASE = test.split("""/""" )[2] else: _SCREAMING_SNAKE_CASE = None return test def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> Optional[int]: """simple docstring""" _SCREAMING_SNAKE_CASE = [(x[0], x[1], get_model(x[2] )) for x in logs] _SCREAMING_SNAKE_CASE = [x for x in logs if x[2] is not None] _SCREAMING_SNAKE_CASE = {x[2] for x in logs} _SCREAMING_SNAKE_CASE = {} for test in tests: _SCREAMING_SNAKE_CASE = Counter() # count by errors in `test` counter.update([x[1] for x in logs if x[2] == test] ) _SCREAMING_SNAKE_CASE = counter.most_common() _SCREAMING_SNAKE_CASE = {error: count for error, count in counts if (error_filter is None or error not in error_filter)} _SCREAMING_SNAKE_CASE = sum(error_counts.values() ) if n_errors > 0: _SCREAMING_SNAKE_CASE = {"""count""": n_errors, """errors""": error_counts} _SCREAMING_SNAKE_CASE = dict(sorted(r.items() , key=lambda SCREAMING_SNAKE_CASE_ : item[1]["count"] , reverse=SCREAMING_SNAKE_CASE_ ) ) return r def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> str: """simple docstring""" _SCREAMING_SNAKE_CASE = """| no. | error | status |""" _SCREAMING_SNAKE_CASE = """|-:|:-|:-|""" _SCREAMING_SNAKE_CASE = [header, sep] for error in reduced_by_error: _SCREAMING_SNAKE_CASE = reduced_by_error[error]["""count"""] _SCREAMING_SNAKE_CASE = F"| {count} | {error[:1_00]} | |" lines.append(SCREAMING_SNAKE_CASE_ ) return "\n".join(SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> str: """simple docstring""" _SCREAMING_SNAKE_CASE = """| model | no. of errors | major error | count |""" _SCREAMING_SNAKE_CASE = """|-:|-:|-:|-:|""" _SCREAMING_SNAKE_CASE = [header, sep] for model in reduced_by_model: _SCREAMING_SNAKE_CASE = reduced_by_model[model]["""count"""] _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = list(reduced_by_model[model]["""errors"""].items() )[0] _SCREAMING_SNAKE_CASE = F"| {model} | {count} | {error[:60]} | {_count} |" lines.append(SCREAMING_SNAKE_CASE_ ) return "\n".join(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": UpperCamelCase__ : int = argparse.ArgumentParser() # Required parameters parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.") parser.add_argument( "--output_dir", type=str, required=True, help="Where to store the downloaded artifacts and other result files.", ) parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.") UpperCamelCase__ : str = parser.parse_args() os.makedirs(args.output_dir, exist_ok=True) UpperCamelCase__ : List[Any] = get_job_links(args.workflow_run_id, token=args.token) UpperCamelCase__ : List[str] = {} # To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee. # For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`. if _job_links: for k, v in _job_links.items(): # This is how GitHub actions combine job names. if " / " in k: UpperCamelCase__ : Dict = k.find(" / ") UpperCamelCase__ : Optional[Any] = k[index + len(" / ") :] UpperCamelCase__ : List[str] = v with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp: json.dump(job_links, fp, ensure_ascii=False, indent=4) UpperCamelCase__ : List[str] = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) for idx, (name, url) in enumerate(artifacts.items()): download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) UpperCamelCase__ : List[str] = get_all_errors(args.output_dir, job_links=job_links) # `e[1]` is the error UpperCamelCase__ : str = Counter() counter.update([e[1] for e in errors]) # print the top 30 most common test errors UpperCamelCase__ : List[str] = counter.most_common(30) for item in most_common: print(item) with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp: json.dump(errors, fp, ensure_ascii=False, indent=4) UpperCamelCase__ : Optional[Any] = reduce_by_error(errors) UpperCamelCase__ : Optional[int] = reduce_by_model(errors) UpperCamelCase__ : Optional[int] = make_github_table(reduced_by_error) UpperCamelCase__ : Optional[Any] = make_github_table_per_model(reduced_by_model) with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp: fp.write(sa) with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp: fp.write(sa)
715
'''simple docstring''' import pytest import requests from datasets.utils.file_utils import http_head from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline @pytest.mark.integration def lowerCAmelCase_ ( ) -> List[Any]: """simple docstring""" with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ): with pytest.raises(SCREAMING_SNAKE_CASE_ ): requests.request("""GET""" , """https://huggingface.co""" ) with pytest.raises(requests.exceptions.ConnectTimeout ): requests.request("""GET""" , """https://huggingface.co""" , timeout=1.0 ) @pytest.mark.integration def lowerCAmelCase_ ( ) -> int: """simple docstring""" with offline(OfflineSimulationMode.CONNECTION_FAILS ): with pytest.raises(requests.exceptions.ConnectionError ): requests.request("""GET""" , """https://huggingface.co""" ) def lowerCAmelCase_ ( ) -> Optional[Any]: """simple docstring""" with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ): with pytest.raises(SCREAMING_SNAKE_CASE_ ): http_head("""https://huggingface.co""" )
0
0
import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class __lowercase ( unittest.TestCase ): def __init__( self , a__ , a__=7 , a__=3 , a__=1_8 , a__=3_0 , a__=4_0_0 , a__=True , a__=None , a__=True , ) -> Optional[int]: '''simple docstring''' A_ = size if size is not None else {'''height''': 1_8, '''width''': 1_8} A_ = parent A_ = batch_size A_ = num_channels A_ = image_size A_ = min_resolution A_ = max_resolution A_ = do_resize A_ = size A_ = apply_ocr def lowerCAmelCase_ ( self ) -> Any: '''simple docstring''' return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class __lowercase ( A , unittest.TestCase ): __magic_name__ : List[Any] = LayoutLMvaImageProcessor if is_pytesseract_available() else None def lowerCAmelCase_ ( self ) -> str: '''simple docstring''' A_ = LayoutLMvaImageProcessingTester(self ) @property def lowerCAmelCase_ ( self ) -> Union[str, Any]: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase_ ( self ) -> List[str]: '''simple docstring''' A_ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(a__ , '''do_resize''' ) ) self.assertTrue(hasattr(a__ , '''size''' ) ) self.assertTrue(hasattr(a__ , '''apply_ocr''' ) ) def lowerCAmelCase_ ( self ) -> Union[str, Any]: '''simple docstring''' A_ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''height''': 1_8, '''width''': 1_8} ) A_ = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 ) self.assertEqual(image_processor.size , {'''height''': 4_2, '''width''': 4_2} ) def lowerCAmelCase_ ( self ) -> Optional[Any]: '''simple docstring''' pass def lowerCAmelCase_ ( self ) -> Optional[int]: '''simple docstring''' # Initialize image_processing A_ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ ) for image in image_inputs: self.assertIsInstance(a__ , Image.Image ) # Test not batched input A_ = image_processing(image_inputs[0] , return_tensors='''pt''' ) self.assertEqual( encoding.pixel_values.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) self.assertIsInstance(encoding.words , a__ ) self.assertIsInstance(encoding.boxes , a__ ) # Test batched A_ = image_processing(a__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) def lowerCAmelCase_ ( self ) -> Union[str, Any]: '''simple docstring''' # Initialize image_processing A_ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , numpify=a__ ) for image in image_inputs: self.assertIsInstance(a__ , np.ndarray ) # Test not batched input A_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) # Test batched A_ = image_processing(a__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) def lowerCAmelCase_ ( self ) -> Tuple: '''simple docstring''' # Initialize image_processing A_ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , torchify=a__ ) for image in image_inputs: self.assertIsInstance(a__ , torch.Tensor ) # Test not batched input A_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) # Test batched A_ = image_processing(a__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) def lowerCAmelCase_ ( self ) -> Union[str, Any]: '''simple docstring''' # with apply_OCR = True A_ = LayoutLMvaImageProcessor() from datasets import load_dataset A_ = load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' ) A_ = Image.open(ds[0]['''file'''] ).convert('''RGB''' ) A_ = image_processing(a__ , return_tensors='''pt''' ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4) ) self.assertEqual(len(encoding.words ) , len(encoding.boxes ) ) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 A_ = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231 A_ = [[[1_4_1, 5_7, 2_1_4, 6_9], [2_2_8, 5_8, 2_5_2, 6_9], [1_4_1, 7_5, 2_1_6, 8_8], [2_3_0, 7_9, 2_8_0, 8_8], [1_4_2, 2_6_0, 2_1_8, 2_7_3], [2_3_0, 2_6_1, 2_5_5, 2_7_3], [1_4_3, 2_7_9, 2_1_8, 2_9_0], [2_3_1, 2_8_2, 2_9_0, 2_9_1], [1_4_3, 3_4_2, 2_1_8, 3_5_4], [2_3_1, 3_4_5, 2_8_9, 3_5_5], [2_0_2, 3_6_2, 2_2_7, 3_7_3], [1_4_3, 3_7_9, 2_2_0, 3_9_2], [2_3_1, 3_8_2, 2_9_1, 3_9_4], [1_4_4, 7_1_4, 2_2_0, 7_2_6], [2_3_1, 7_1_5, 2_5_6, 7_2_6], [1_4_4, 7_3_2, 2_2_0, 7_4_5], [2_3_2, 7_3_6, 2_9_1, 7_4_7], [1_4_4, 7_6_9, 2_1_8, 7_8_2], [2_3_1, 7_7_0, 2_5_6, 7_8_2], [1_4_1, 7_8_8, 2_0_2, 8_0_1], [2_1_5, 7_9_1, 2_7_4, 8_0_4], [1_4_3, 8_2_6, 2_0_4, 8_3_8], [2_1_5, 8_2_6, 2_4_0, 8_3_8], [1_4_2, 8_4_4, 2_0_2, 8_5_7], [2_1_5, 8_4_7, 2_7_4, 8_5_9], [3_3_4, 5_7, 4_2_7, 6_9], [4_4_0, 5_7, 5_2_2, 6_9], [3_6_9, 7_5, 4_6_1, 8_8], [4_6_9, 7_5, 5_1_6, 8_8], [5_2_8, 7_6, 5_6_2, 8_8], [5_7_0, 7_6, 6_6_7, 8_8], [6_7_5, 7_5, 7_1_1, 8_7], [7_2_1, 7_9, 7_7_8, 8_8], [7_8_9, 7_5, 8_4_0, 8_8], [3_6_9, 9_7, 4_7_0, 1_0_7], [4_8_4, 9_4, 5_0_7, 1_0_6], [5_1_8, 9_4, 5_6_2, 1_0_7], [5_7_6, 9_4, 6_5_5, 1_1_0], [6_6_8, 9_4, 7_9_2, 1_0_9], [8_0_4, 9_5, 8_2_9, 1_0_7], [3_6_9, 1_1_3, 4_6_5, 1_2_5], [4_7_7, 1_1_6, 5_4_7, 1_2_5], [5_6_2, 1_1_3, 6_5_8, 1_2_5], [6_7_1, 1_1_6, 7_4_8, 1_2_5], [7_6_1, 1_1_3, 8_1_1, 1_2_5], [3_6_9, 1_3_1, 4_6_5, 1_4_3], [4_7_7, 1_3_3, 5_4_8, 1_4_3], [5_6_3, 1_3_0, 6_9_8, 1_4_5], [7_1_0, 1_3_0, 8_0_2, 1_4_6], [3_3_6, 1_7_1, 4_1_2, 1_8_3], [4_2_3, 1_7_1, 5_7_2, 1_8_3], [5_8_2, 1_7_0, 7_1_6, 1_8_4], [7_2_8, 1_7_1, 8_1_7, 1_8_7], [8_2_9, 1_7_1, 8_4_4, 1_8_6], [3_3_8, 1_9_7, 4_8_2, 2_1_2], [5_0_7, 1_9_6, 5_5_7, 2_0_9], [5_6_9, 1_9_6, 5_9_5, 2_0_8], [6_1_0, 1_9_6, 7_0_2, 2_0_9], [5_0_5, 2_1_4, 5_8_3, 2_2_6], [5_9_5, 2_1_4, 6_5_6, 2_2_7], [6_7_0, 2_1_5, 8_0_7, 2_2_7], [3_3_5, 2_5_9, 5_4_3, 2_7_4], [5_5_6, 2_5_9, 7_0_8, 2_7_2], [3_7_2, 2_7_9, 4_2_2, 2_9_1], [4_3_5, 2_7_9, 4_6_0, 2_9_1], [4_7_4, 2_7_9, 5_7_4, 2_9_2], [5_8_7, 2_7_8, 6_6_4, 2_9_1], [6_7_6, 2_7_8, 7_3_8, 2_9_1], [7_5_1, 2_7_9, 8_3_4, 2_9_1], [3_7_2, 2_9_8, 4_3_4, 3_1_0], [3_3_5, 3_4_1, 4_8_3, 3_5_4], [4_9_7, 3_4_1, 6_5_5, 3_5_4], [6_6_7, 3_4_1, 7_2_8, 3_5_4], [7_4_0, 3_4_1, 8_2_5, 3_5_4], [3_3_5, 3_6_0, 4_3_0, 3_7_2], [4_4_2, 3_6_0, 5_3_4, 3_7_2], [5_4_5, 3_5_9, 6_8_7, 3_7_2], [6_9_7, 3_6_0, 7_5_4, 3_7_2], [7_6_5, 3_6_0, 8_2_3, 3_7_3], [3_3_4, 3_7_8, 4_2_8, 3_9_1], [4_4_0, 3_7_8, 5_7_7, 3_9_4], [5_9_0, 3_7_8, 7_0_5, 3_9_1], [7_2_0, 3_7_8, 8_0_1, 3_9_1], [3_3_4, 3_9_7, 4_0_0, 4_0_9], [3_7_0, 4_1_6, 5_2_9, 4_2_9], [5_4_4, 4_1_6, 5_7_6, 4_3_2], [5_8_7, 4_1_6, 6_6_5, 4_2_8], [6_7_7, 4_1_6, 8_1_4, 4_2_9], [3_7_2, 4_3_5, 4_5_2, 4_5_0], [4_6_5, 4_3_4, 4_9_5, 4_4_7], [5_1_1, 4_3_4, 6_0_0, 4_4_7], [6_1_1, 4_3_6, 6_3_7, 4_4_7], [6_4_9, 4_3_6, 6_9_4, 4_5_1], [7_0_5, 4_3_8, 8_2_4, 4_4_7], [3_6_9, 4_5_3, 4_5_2, 4_6_6], [4_6_4, 4_5_4, 5_0_9, 4_6_6], [5_2_2, 4_5_3, 6_1_1, 4_6_9], [6_2_5, 4_5_3, 7_9_2, 4_6_9], [3_7_0, 4_7_2, 5_5_6, 4_8_8], [5_7_0, 4_7_2, 6_8_4, 4_8_7], [6_9_7, 4_7_2, 7_1_8, 4_8_5], [7_3_2, 4_7_2, 8_3_5, 4_8_8], [3_6_9, 4_9_0, 4_1_1, 5_0_3], [4_2_5, 4_9_0, 4_8_4, 5_0_3], [4_9_6, 4_9_0, 6_3_5, 5_0_6], [6_4_5, 4_9_0, 7_0_7, 5_0_3], [7_1_8, 4_9_1, 7_6_1, 5_0_3], [7_7_1, 4_9_0, 8_4_0, 5_0_3], [3_3_6, 5_1_0, 3_7_4, 5_2_1], [3_8_8, 5_1_0, 4_4_7, 5_2_2], [4_6_0, 5_1_0, 4_8_9, 5_2_1], [5_0_3, 5_1_0, 5_8_0, 5_2_2], [5_9_2, 5_0_9, 7_3_6, 5_2_5], [7_4_5, 5_0_9, 7_7_0, 5_2_2], [7_8_1, 5_0_9, 8_4_0, 5_2_2], [3_3_8, 5_2_8, 4_3_4, 5_4_1], [4_4_8, 5_2_8, 5_9_6, 5_4_1], [6_0_9, 5_2_7, 6_8_7, 5_4_0], [7_0_0, 5_2_8, 7_9_2, 5_4_1], [3_3_6, 5_4_6, 3_9_7, 5_5_9], [4_0_7, 5_4_6, 4_3_1, 5_5_9], [4_4_3, 5_4_6, 5_2_5, 5_6_0], [5_3_7, 5_4_6, 6_8_0, 5_6_2], [6_8_8, 5_4_6, 7_1_4, 5_5_9], [7_2_2, 5_4_6, 8_3_7, 5_6_2], [3_3_6, 5_6_5, 4_4_9, 5_8_1], [4_6_1, 5_6_5, 4_8_5, 5_7_7], [4_9_7, 5_6_5, 6_6_5, 5_8_1], [6_8_1, 5_6_5, 7_1_8, 5_7_7], [7_3_2, 5_6_5, 8_3_7, 5_8_0], [3_3_7, 5_8_4, 4_3_8, 5_9_7], [4_5_2, 5_8_3, 5_2_1, 5_9_6], [5_3_5, 5_8_4, 6_7_7, 5_9_9], [6_9_0, 5_8_3, 7_8_7, 5_9_6], [8_0_1, 5_8_3, 8_2_5, 5_9_6], [3_3_8, 6_0_2, 4_7_8, 6_1_5], [4_9_2, 6_0_2, 5_3_0, 6_1_4], [5_4_3, 6_0_2, 6_3_8, 6_1_5], [6_5_0, 6_0_2, 6_7_6, 6_1_4], [6_8_8, 6_0_2, 7_8_8, 6_1_5], [8_0_2, 6_0_2, 8_4_3, 6_1_4], [3_3_7, 6_2_1, 5_0_2, 6_3_3], [5_1_6, 6_2_1, 6_1_5, 6_3_7], [6_2_9, 6_2_1, 7_7_4, 6_3_6], [7_8_9, 6_2_1, 8_2_7, 6_3_3], [3_3_7, 6_3_9, 4_1_8, 6_5_2], [4_3_2, 6_4_0, 5_7_1, 6_5_3], [5_8_7, 6_3_9, 7_3_1, 6_5_5], [7_4_3, 6_3_9, 7_6_9, 6_5_2], [7_8_0, 6_3_9, 8_4_1, 6_5_2], [3_3_8, 6_5_8, 4_4_0, 6_7_3], [4_5_5, 6_5_8, 4_9_1, 6_7_0], [5_0_8, 6_5_8, 6_0_2, 6_7_1], [6_1_6, 6_5_8, 6_3_8, 6_7_0], [6_5_4, 6_5_8, 8_3_5, 6_7_4], [3_3_7, 6_7_7, 4_2_9, 6_8_9], [3_3_7, 7_1_4, 4_8_2, 7_2_6], [4_9_5, 7_1_4, 5_4_8, 7_2_6], [5_6_1, 7_1_4, 6_8_3, 7_2_6], [3_3_8, 7_7_0, 4_6_1, 7_8_2], [4_7_4, 7_6_9, 5_5_4, 7_8_5], [4_8_9, 7_8_8, 5_6_2, 8_0_3], [5_7_6, 7_8_8, 6_4_3, 8_0_1], [6_5_6, 7_8_7, 7_5_1, 8_0_4], [7_6_4, 7_8_8, 8_4_4, 8_0_1], [3_3_4, 8_2_5, 4_2_1, 8_3_8], [4_3_0, 8_2_4, 5_7_4, 8_3_8], [5_8_4, 8_2_4, 7_2_3, 8_4_1], [3_3_5, 8_4_4, 4_5_0, 8_5_7], [4_6_4, 8_4_3, 5_8_3, 8_6_0], [6_2_8, 8_6_2, 7_5_5, 8_7_5], [7_6_9, 8_6_1, 8_4_8, 8_7_8]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words , a__ ) self.assertListEqual(encoding.boxes , a__ ) # with apply_OCR = False A_ = LayoutLMvaImageProcessor(apply_ocr=a__ ) A_ = image_processing(a__ , return_tensors='''pt''' ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4) )
141
from __future__ import annotations import unittest from transformers import MobileBertConfig, is_tf_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_MODEL_FOR_PRETRAINING_MAPPING, TFMobileBertForMaskedLM, TFMobileBertForMultipleChoice, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertModel, ) @require_tf class __lowercase ( A , A , unittest.TestCase ): __magic_name__ : List[str] = ( ( TFMobileBertModel, TFMobileBertForMaskedLM, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertForMultipleChoice, ) if is_tf_available() else () ) __magic_name__ : Optional[Any] = ( { '''feature-extraction''': TFMobileBertModel, '''fill-mask''': TFMobileBertForMaskedLM, '''question-answering''': TFMobileBertForQuestionAnswering, '''text-classification''': TFMobileBertForSequenceClassification, '''token-classification''': TFMobileBertForTokenClassification, '''zero-shot''': TFMobileBertForSequenceClassification, } if is_tf_available() else {} ) __magic_name__ : Optional[int] = False __magic_name__ : List[Any] = False def lowerCAmelCase_ ( self , a__ , a__ , a__=False ) -> Union[str, Any]: '''simple docstring''' A_ = super()._prepare_for_class(a__ , a__ , return_labels=a__ ) if return_labels: if model_class in get_values(a__ ): A_ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) return inputs_dict class __lowercase ( A ): def __init__( self , a__ , a__=1_3 , a__=7 , a__=True , a__=True , a__=True , a__=True , a__=9_9 , a__=3_2 , a__=3_2 , a__=2 , a__=4 , a__=3_7 , a__="gelu" , a__=0.1 , a__=0.1 , a__=5_1_2 , a__=1_6 , a__=2 , a__=0.02 , a__=3 , a__=4 , a__=None , ) -> List[str]: '''simple docstring''' A_ = parent A_ = batch_size A_ = seq_length A_ = is_training A_ = use_input_mask A_ = use_token_type_ids A_ = use_labels A_ = vocab_size A_ = hidden_size A_ = num_hidden_layers A_ = num_attention_heads A_ = intermediate_size A_ = hidden_act A_ = hidden_dropout_prob A_ = attention_probs_dropout_prob A_ = max_position_embeddings A_ = type_vocab_size A_ = type_sequence_label_size A_ = initializer_range A_ = num_labels A_ = num_choices A_ = scope A_ = embedding_size def lowerCAmelCase_ ( self ) -> Tuple: '''simple docstring''' A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A_ = None if self.use_input_mask: A_ = random_attention_mask([self.batch_size, self.seq_length] ) A_ = None if self.use_token_type_ids: A_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) A_ = None A_ = None A_ = None if self.use_labels: A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A_ = ids_tensor([self.batch_size] , self.num_choices ) A_ = MobileBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ) -> Dict: '''simple docstring''' A_ = TFMobileBertModel(config=a__ ) A_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} A_ = model(a__ ) A_ = [input_ids, input_mask] A_ = model(a__ ) A_ = model(a__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCAmelCase_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ) -> Optional[int]: '''simple docstring''' A_ = TFMobileBertForMaskedLM(config=a__ ) A_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} A_ = model(a__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ) -> int: '''simple docstring''' A_ = TFMobileBertForNextSentencePrediction(config=a__ ) A_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} A_ = model(a__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def lowerCAmelCase_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ) -> List[str]: '''simple docstring''' A_ = TFMobileBertForPreTraining(config=a__ ) A_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} A_ = model(a__ ) self.parent.assertEqual( result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def lowerCAmelCase_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ) -> List[Any]: '''simple docstring''' A_ = self.num_labels A_ = TFMobileBertForSequenceClassification(config=a__ ) A_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} A_ = model(a__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ) -> Any: '''simple docstring''' A_ = self.num_choices A_ = TFMobileBertForMultipleChoice(config=a__ ) A_ = tf.tile(tf.expand_dims(a__ , 1 ) , (1, self.num_choices, 1) ) A_ = tf.tile(tf.expand_dims(a__ , 1 ) , (1, self.num_choices, 1) ) A_ = tf.tile(tf.expand_dims(a__ , 1 ) , (1, self.num_choices, 1) ) A_ = { '''input_ids''': multiple_choice_inputs_ids, '''attention_mask''': multiple_choice_input_mask, '''token_type_ids''': multiple_choice_token_type_ids, } A_ = model(a__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCAmelCase_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ) -> Optional[Any]: '''simple docstring''' A_ = self.num_labels A_ = TFMobileBertForTokenClassification(config=a__ ) A_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} A_ = model(a__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ) -> Any: '''simple docstring''' A_ = TFMobileBertForQuestionAnswering(config=a__ ) A_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} A_ = model(a__ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCAmelCase_ ( self ) -> Any: '''simple docstring''' A_ = self.prepare_config_and_inputs() ( ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ) = config_and_inputs A_ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict def lowerCAmelCase_ ( self ) -> str: '''simple docstring''' A_ = TFMobileBertModelTest.TFMobileBertModelTester(self ) A_ = ConfigTester(self , config_class=a__ , hidden_size=3_7 ) def lowerCAmelCase_ ( self ) -> Optional[Any]: '''simple docstring''' self.config_tester.run_common_tests() def lowerCAmelCase_ ( self ) -> int: '''simple docstring''' A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_model(*a__ ) def lowerCAmelCase_ ( self ) -> Tuple: '''simple docstring''' A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_masked_lm(*a__ ) def lowerCAmelCase_ ( self ) -> List[str]: '''simple docstring''' A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_multiple_choice(*a__ ) def lowerCAmelCase_ ( self ) -> Union[str, Any]: '''simple docstring''' A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*a__ ) def lowerCAmelCase_ ( self ) -> Optional[Any]: '''simple docstring''' A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_pretraining(*a__ ) def lowerCAmelCase_ ( self ) -> Optional[Any]: '''simple docstring''' A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_question_answering(*a__ ) def lowerCAmelCase_ ( self ) -> List[Any]: '''simple docstring''' A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_sequence_classification(*a__ ) def lowerCAmelCase_ ( self ) -> List[Any]: '''simple docstring''' A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_token_classification(*a__ ) @slow def lowerCAmelCase_ ( self ) -> int: '''simple docstring''' # for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["google/mobilebert-uncased"]: A_ = TFMobileBertModel.from_pretrained(a__ ) self.assertIsNotNone(a__ ) @require_tf class __lowercase ( unittest.TestCase ): @slow def lowerCAmelCase_ ( self ) -> Optional[Any]: '''simple docstring''' A_ = TFMobileBertForPreTraining.from_pretrained('''google/mobilebert-uncased''' ) A_ = tf.constant([[0, 1, 2, 3, 4, 5]] ) A_ = model(a__ )[0] A_ = [1, 6, 3_0_5_2_2] self.assertEqual(output.shape , a__ ) A_ = tf.constant( [ [ [-4.5_91_95_47, -9.24_82_95, -9.64_52_56], [-6.7_30_61_75, -6.44_02_84, -6.6_05_28_37], [-7.2_74_35_06, -6.7_84_79_15, -6.02_46_73], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , a__ , atol=1E-4 )
141
1
import argparse from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta from transformers.utils import logging logging.set_verbosity_info() def A__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]: # Initialise PyTorch model lowerCamelCase : int =TaConfig.from_json_file(SCREAMING_SNAKE_CASE_ ) print(F"Building PyTorch model from configuration: {config}" ) lowerCamelCase : List[str] =TaForConditionalGeneration(SCREAMING_SNAKE_CASE_ ) # Load weights from tf checkpoint load_tf_weights_in_ta(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Save pytorch-model print(F"Save PyTorch model to {pytorch_dump_path}" ) model.save_pretrained(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": snake_case_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) snake_case_ = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
262
import re import string from collections import Counter import sacrebleu import sacremoses from packaging import version import datasets snake_case_ = ''' @inproceedings{xu-etal-2016-optimizing, title = {Optimizing Statistical Machine Translation for Text Simplification}, authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris}, journal = {Transactions of the Association for Computational Linguistics}, volume = {4}, year={2016}, url = {https://www.aclweb.org/anthology/Q16-1029}, pages = {401--415 }, @inproceedings{post-2018-call, title = "A Call for Clarity in Reporting {BLEU} Scores", author = "Post, Matt", booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers", month = oct, year = "2018", address = "Belgium, Brussels", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/W18-6319", pages = "186--191", } ''' snake_case_ = '''\ WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU It can be used to evaluate the quality of machine-generated texts. ''' snake_case_ = ''' Calculates sari score (between 0 and 100) given a list of source and predicted sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score. Args: sources: list of source sentences where each sentence should be a string. predictions: list of predicted sentences where each sentence should be a string. references: list of lists of reference sentences where each sentence should be a string. Returns: sari: sari score sacrebleu: sacrebleu score exact: exact score Examples: >>> sources=["About 95 species are currently accepted ."] >>> predictions=["About 95 you now get in ."] >>> references=[["About 95 species are currently known ."]] >>> wiki_split = datasets.load_metric("wiki_split") >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references) >>> print(results) {\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0} ''' def A__ ( SCREAMING_SNAKE_CASE_ ) -> Tuple: def remove_articles(SCREAMING_SNAKE_CASE_ ): lowerCamelCase : Tuple =re.compile(R'''\b(a|an|the)\b''' , re.UNICODE ) return re.sub(SCREAMING_SNAKE_CASE_ , ''' ''' , SCREAMING_SNAKE_CASE_ ) def white_space_fix(SCREAMING_SNAKE_CASE_ ): return " ".join(text.split() ) def remove_punc(SCREAMING_SNAKE_CASE_ ): lowerCamelCase : int =set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(SCREAMING_SNAKE_CASE_ ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(SCREAMING_SNAKE_CASE_ ) ) ) ) def A__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: return int(normalize_answer(SCREAMING_SNAKE_CASE_ ) == normalize_answer(SCREAMING_SNAKE_CASE_ ) ) def A__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: lowerCamelCase : Union[str, Any] =[any(compute_exact(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for ref in refs ) for pred, refs in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )] return (sum(SCREAMING_SNAKE_CASE_ ) / len(SCREAMING_SNAKE_CASE_ )) * 1_0_0 def A__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str: lowerCamelCase : Any =[rgram for rgrams in rgramslist for rgram in rgrams] lowerCamelCase : int =Counter(SCREAMING_SNAKE_CASE_ ) lowerCamelCase : Dict =Counter(SCREAMING_SNAKE_CASE_ ) lowerCamelCase : Any =Counter() for sgram, scount in sgramcounter.items(): lowerCamelCase : Tuple =scount * numref lowerCamelCase : Optional[int] =Counter(SCREAMING_SNAKE_CASE_ ) lowerCamelCase : Tuple =Counter() for cgram, ccount in cgramcounter.items(): lowerCamelCase : Tuple =ccount * numref # KEEP lowerCamelCase : str =sgramcounter_rep & cgramcounter_rep lowerCamelCase : Union[str, Any] =keepgramcounter_rep & rgramcounter lowerCamelCase : Optional[Any] =sgramcounter_rep & rgramcounter lowerCamelCase : Optional[Any] =0 lowerCamelCase : List[Any] =0 for keepgram in keepgramcountergood_rep: keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram] # Fix an alleged bug [2] in the keep score computation. # keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram] keeptmpscorea += keepgramcountergood_rep[keepgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. lowerCamelCase : Tuple =1 lowerCamelCase : int =1 if len(SCREAMING_SNAKE_CASE_ ) > 0: lowerCamelCase : Tuple =keeptmpscorea / len(SCREAMING_SNAKE_CASE_ ) if len(SCREAMING_SNAKE_CASE_ ) > 0: # Fix an alleged bug [2] in the keep score computation. # keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep) lowerCamelCase : Any =keeptmpscorea / sum(keepgramcounterall_rep.values() ) lowerCamelCase : Optional[Any] =0 if keepscore_precision > 0 or keepscore_recall > 0: lowerCamelCase : Optional[int] =2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall) # DELETION lowerCamelCase : int =sgramcounter_rep - cgramcounter_rep lowerCamelCase : Dict =delgramcounter_rep - rgramcounter lowerCamelCase : Dict =sgramcounter_rep - rgramcounter lowerCamelCase : Optional[int] =0 lowerCamelCase : List[Any] =0 for delgram in delgramcountergood_rep: deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram] deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. lowerCamelCase : str =1 if len(SCREAMING_SNAKE_CASE_ ) > 0: lowerCamelCase : Optional[int] =deltmpscorea / len(SCREAMING_SNAKE_CASE_ ) # ADDITION lowerCamelCase : List[Any] =set(SCREAMING_SNAKE_CASE_ ) - set(SCREAMING_SNAKE_CASE_ ) lowerCamelCase : int =set(SCREAMING_SNAKE_CASE_ ) & set(SCREAMING_SNAKE_CASE_ ) lowerCamelCase : Optional[Any] =set(SCREAMING_SNAKE_CASE_ ) - set(SCREAMING_SNAKE_CASE_ ) lowerCamelCase : int =0 for addgram in addgramcountergood: addtmpscore += 1 # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. lowerCamelCase : int =1 lowerCamelCase : List[Any] =1 if len(SCREAMING_SNAKE_CASE_ ) > 0: lowerCamelCase : str =addtmpscore / len(SCREAMING_SNAKE_CASE_ ) if len(SCREAMING_SNAKE_CASE_ ) > 0: lowerCamelCase : List[str] =addtmpscore / len(SCREAMING_SNAKE_CASE_ ) lowerCamelCase : Optional[Any] =0 if addscore_precision > 0 or addscore_recall > 0: lowerCamelCase : Optional[Any] =2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall) return (keepscore, delscore_precision, addscore) def A__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict: lowerCamelCase : Optional[int] =len(SCREAMING_SNAKE_CASE_ ) lowerCamelCase : Dict =ssent.split(''' ''' ) lowerCamelCase : Any =csent.split(''' ''' ) lowerCamelCase : str =[] lowerCamelCase : Optional[Any] =[] lowerCamelCase : List[Any] =[] lowerCamelCase : List[str] =[] lowerCamelCase : Tuple =[] lowerCamelCase : Optional[Any] =[] lowerCamelCase : int =[] lowerCamelCase : List[str] =[] lowerCamelCase : Dict =[] lowerCamelCase : Any =[] for rsent in rsents: lowerCamelCase : Any =rsent.split(''' ''' ) lowerCamelCase : int =[] lowerCamelCase : Optional[Any] =[] lowerCamelCase : List[Any] =[] ragramslist.append(SCREAMING_SNAKE_CASE_ ) for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) - 1 ): if i < len(SCREAMING_SNAKE_CASE_ ) - 1: lowerCamelCase : Optional[int] =ragrams[i] + ''' ''' + ragrams[i + 1] ragrams.append(SCREAMING_SNAKE_CASE_ ) if i < len(SCREAMING_SNAKE_CASE_ ) - 2: lowerCamelCase : str =ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] ragrams.append(SCREAMING_SNAKE_CASE_ ) if i < len(SCREAMING_SNAKE_CASE_ ) - 3: lowerCamelCase : int =ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3] ragrams.append(SCREAMING_SNAKE_CASE_ ) ragramslist.append(SCREAMING_SNAKE_CASE_ ) ragramslist.append(SCREAMING_SNAKE_CASE_ ) ragramslist.append(SCREAMING_SNAKE_CASE_ ) for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) - 1 ): if i < len(SCREAMING_SNAKE_CASE_ ) - 1: lowerCamelCase : Optional[int] =sagrams[i] + ''' ''' + sagrams[i + 1] sagrams.append(SCREAMING_SNAKE_CASE_ ) if i < len(SCREAMING_SNAKE_CASE_ ) - 2: lowerCamelCase : List[Any] =sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] sagrams.append(SCREAMING_SNAKE_CASE_ ) if i < len(SCREAMING_SNAKE_CASE_ ) - 3: lowerCamelCase : Optional[int] =sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3] sagrams.append(SCREAMING_SNAKE_CASE_ ) for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) - 1 ): if i < len(SCREAMING_SNAKE_CASE_ ) - 1: lowerCamelCase : Optional[int] =cagrams[i] + ''' ''' + cagrams[i + 1] cagrams.append(SCREAMING_SNAKE_CASE_ ) if i < len(SCREAMING_SNAKE_CASE_ ) - 2: lowerCamelCase : List[str] =cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] cagrams.append(SCREAMING_SNAKE_CASE_ ) if i < len(SCREAMING_SNAKE_CASE_ ) - 3: lowerCamelCase : str =cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3] cagrams.append(SCREAMING_SNAKE_CASE_ ) ((lowerCamelCase) , (lowerCamelCase) , (lowerCamelCase)) : Any =SARIngram(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ((lowerCamelCase) , (lowerCamelCase) , (lowerCamelCase)) : Optional[Any] =SARIngram(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ((lowerCamelCase) , (lowerCamelCase) , (lowerCamelCase)) : List[Any] =SARIngram(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ((lowerCamelCase) , (lowerCamelCase) , (lowerCamelCase)) : List[str] =SARIngram(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowerCamelCase : List[Any] =sum([keepascore, keepascore, keepascore, keepascore] ) / 4 lowerCamelCase : List[str] =sum([delascore, delascore, delascore, delascore] ) / 4 lowerCamelCase : int =sum([addascore, addascore, addascore, addascore] ) / 4 lowerCamelCase : Any =(avgkeepscore + avgdelscore + avgaddscore) / 3 return finalscore def A__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = "13a" , SCREAMING_SNAKE_CASE_ = True ) -> Any: # Normalization is requried for the ASSET dataset (one of the primary # datasets in sentence simplification) to allow using space # to split the sentence. Even though Wiki-Auto and TURK datasets, # do not require normalization, we do it for consistency. # Code adapted from the EASSE library [1] written by the authors of the ASSET dataset. # [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7 if lowercase: lowerCamelCase : Union[str, Any] =sentence.lower() if tokenizer in ["13a", "intl"]: if version.parse(sacrebleu.__version__ ).major >= 2: lowerCamelCase : List[Any] =sacrebleu.metrics.bleu._get_tokenizer(SCREAMING_SNAKE_CASE_ )()(SCREAMING_SNAKE_CASE_ ) else: lowerCamelCase : Any =sacrebleu.TOKENIZERS[tokenizer]()(SCREAMING_SNAKE_CASE_ ) elif tokenizer == "moses": lowerCamelCase : int =sacremoses.MosesTokenizer().tokenize(SCREAMING_SNAKE_CASE_ , return_str=SCREAMING_SNAKE_CASE_ , escape=SCREAMING_SNAKE_CASE_ ) elif tokenizer == "penn": lowerCamelCase : Any =sacremoses.MosesTokenizer().penn_tokenize(SCREAMING_SNAKE_CASE_ , return_str=SCREAMING_SNAKE_CASE_ ) else: lowerCamelCase : Optional[int] =sentence if not return_str: lowerCamelCase : Union[str, Any] =normalized_sent.split() return normalized_sent def A__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str: if not (len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ )): raise ValueError('''Sources length must match predictions and references lengths.''' ) lowerCamelCase : Dict =0 for src, pred, refs in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): sari_score += SARIsent(normalize(SCREAMING_SNAKE_CASE_ ) , normalize(SCREAMING_SNAKE_CASE_ ) , [normalize(SCREAMING_SNAKE_CASE_ ) for sent in refs] ) lowerCamelCase : str =sari_score / len(SCREAMING_SNAKE_CASE_ ) return 1_0_0 * sari_score def A__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="exp" , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , ) -> Dict: lowerCamelCase : Optional[int] =len(references[0] ) if any(len(SCREAMING_SNAKE_CASE_ ) != references_per_prediction for refs in references ): raise ValueError('''Sacrebleu requires the same number of references for each prediction''' ) lowerCamelCase : Optional[int] =[[refs[i] for refs in references] for i in range(SCREAMING_SNAKE_CASE_ )] lowerCamelCase : Union[str, Any] =sacrebleu.corpus_bleu( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , smooth_method=SCREAMING_SNAKE_CASE_ , smooth_value=SCREAMING_SNAKE_CASE_ , force=SCREAMING_SNAKE_CASE_ , lowercase=SCREAMING_SNAKE_CASE_ , use_effective_order=SCREAMING_SNAKE_CASE_ , ) return output.score @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class snake_case_ ( datasets.Metric): def __lowercase ( self ) -> List[str]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ), } ) , codebase_urls=[ '''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''', '''https://github.com/cocoxu/simplification/blob/master/SARI.py''', '''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''', '''https://github.com/mjpost/sacreBLEU''', ] , reference_urls=[ '''https://www.aclweb.org/anthology/Q16-1029.pdf''', '''https://github.com/mjpost/sacreBLEU''', '''https://en.wikipedia.org/wiki/BLEU''', '''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''', ] , ) def __lowercase ( self , __lowercase , __lowercase , __lowercase ) -> Tuple: lowerCamelCase : str ={} result.update({'''sari''': compute_sari(sources=__lowercase , predictions=__lowercase , references=__lowercase )} ) result.update({'''sacrebleu''': compute_sacrebleu(predictions=__lowercase , references=__lowercase )} ) result.update({'''exact''': compute_em(predictions=__lowercase , references=__lowercase )} ) return result
262
1
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = [[1, 2, 4], [1, 2, 3, 4]] __SCREAMING_SNAKE_CASE : Tuple = DisjunctiveConstraint(_A ) self.assertTrue(isinstance(dc.token_ids , _A ) ) with self.assertRaises(_A ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(_A ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(_A ): DisjunctiveConstraint(_A ) # fails here def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = [[1, 2, 3], [1, 2, 4]] __SCREAMING_SNAKE_CASE : Optional[Any] = DisjunctiveConstraint(_A ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = dc.update(1 ) __SCREAMING_SNAKE_CASE : int = stepped is True and completed is False and reset is False self.assertTrue(_A ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = dc.update(2 ) __SCREAMING_SNAKE_CASE : Optional[Any] = stepped is True and completed is False and reset is False self.assertTrue(_A ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = dc.update(3 ) __SCREAMING_SNAKE_CASE : Union[str, Any] = stepped is True and completed is True and reset is False self.assertTrue(_A ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] __SCREAMING_SNAKE_CASE : str = DisjunctiveConstraint(_A ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
74
import gc import unittest from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline from diffusers.utils import is_flax_available, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" super().tearDown() gc.collect() def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained( '''stabilityai/stable-diffusion-2''' , revision='''bf16''' , dtype=jnp.bfloataa , ) __SCREAMING_SNAKE_CASE : Optional[Any] = '''A painting of a squirrel eating a burger''' __SCREAMING_SNAKE_CASE : int = jax.device_count() __SCREAMING_SNAKE_CASE : Tuple = num_samples * [prompt] __SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe.prepare_inputs(_A ) __SCREAMING_SNAKE_CASE : Tuple = replicate(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = shard(_A ) __SCREAMING_SNAKE_CASE : Dict = jax.random.PRNGKey(0 ) __SCREAMING_SNAKE_CASE : Optional[int] = jax.random.split(_A , jax.device_count() ) __SCREAMING_SNAKE_CASE : str = sd_pipe(_A , _A , _A , num_inference_steps=25 , jit=_A )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) __SCREAMING_SNAKE_CASE : List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) __SCREAMING_SNAKE_CASE : Union[str, Any] = images[0, 253:256, 253:256, -1] __SCREAMING_SNAKE_CASE : Dict = jnp.asarray(jax.device_get(image_slice.flatten() ) ) __SCREAMING_SNAKE_CASE : Tuple = jnp.array([0.42_38, 0.44_14, 0.43_95, 0.44_53, 0.46_29, 0.45_90, 0.45_31, 0.4_55_08, 0.45_12] ) print(F'''output_slice: {output_slice}''' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2 def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = '''stabilityai/stable-diffusion-2''' __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = FlaxDPMSolverMultistepScheduler.from_pretrained(_A , subfolder='''scheduler''' ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = FlaxStableDiffusionPipeline.from_pretrained( _A , scheduler=_A , revision='''bf16''' , dtype=jnp.bfloataa , ) __SCREAMING_SNAKE_CASE : List[str] = scheduler_params __SCREAMING_SNAKE_CASE : Tuple = '''A painting of a squirrel eating a burger''' __SCREAMING_SNAKE_CASE : List[Any] = jax.device_count() __SCREAMING_SNAKE_CASE : Tuple = num_samples * [prompt] __SCREAMING_SNAKE_CASE : Any = sd_pipe.prepare_inputs(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = replicate(_A ) __SCREAMING_SNAKE_CASE : List[str] = shard(_A ) __SCREAMING_SNAKE_CASE : int = jax.random.PRNGKey(0 ) __SCREAMING_SNAKE_CASE : Union[str, Any] = jax.random.split(_A , jax.device_count() ) __SCREAMING_SNAKE_CASE : List[Any] = sd_pipe(_A , _A , _A , num_inference_steps=25 , jit=_A )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) __SCREAMING_SNAKE_CASE : Tuple = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) __SCREAMING_SNAKE_CASE : Dict = images[0, 253:256, 253:256, -1] __SCREAMING_SNAKE_CASE : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) ) __SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.array([0.43_36, 0.4_29_69, 0.44_53, 0.41_99, 0.42_97, 0.45_31, 0.44_34, 0.44_34, 0.42_97] ) print(F'''output_slice: {output_slice}''' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
74
1
def UpperCamelCase ( lowerCAmelCase_ ) -> str: '''simple docstring''' _A= 0 # if input_string is "aba" than new_input_string become "a|b|a" _A= '' _A= '' # append each character + "|" in new_string for range(0, length-1) for i in input_string[: len(lowerCAmelCase_ ) - 1]: new_input_string += i + "|" # append last character new_input_string += input_string[-1] # we will store the starting and ending of previous furthest ending palindromic # substring _A, _A= 0, 0 # length[i] shows the length of palindromic substring with center i _A= [1 for i in range(len(lowerCAmelCase_ ) )] # for each character in new_string find corresponding palindromic string _A= 0 for j in range(len(lowerCAmelCase_ ) ): _A= 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 ) while ( j - k >= 0 and j + k < len(lowerCAmelCase_ ) and new_input_string[k + j] == new_input_string[j - k] ): k += 1 _A= 2 * k - 1 # does this string is ending after the previously explored end (that is r) ? # if yes the update the new r to the last index of this if j + k - 1 > r: _A= j - k + 1 # noqa: E741 _A= j + k - 1 # update max_length and start position if max_length < length[j]: _A= length[j] _A= j # create that string _A= new_input_string[start - max_length // 2 : start + max_length // 2 + 1] for i in s: if i != "|": output_string += i return output_string if __name__ == "__main__": import doctest doctest.testmod()
476
UpperCAmelCase_ = { '''Pillow''': '''Pillow<10.0.0''', '''accelerate''': '''accelerate>=0.20.3''', '''av''': '''av==9.2.0''', '''beautifulsoup4''': '''beautifulsoup4''', '''black''': '''black~=23.1''', '''codecarbon''': '''codecarbon==1.2.0''', '''cookiecutter''': '''cookiecutter==1.7.3''', '''dataclasses''': '''dataclasses''', '''datasets''': '''datasets!=2.5.0''', '''decord''': '''decord==0.6.0''', '''deepspeed''': '''deepspeed>=0.9.3''', '''diffusers''': '''diffusers''', '''dill''': '''dill<0.3.5''', '''evaluate''': '''evaluate>=0.2.0''', '''fairscale''': '''fairscale>0.3''', '''faiss-cpu''': '''faiss-cpu''', '''fastapi''': '''fastapi''', '''filelock''': '''filelock''', '''flax''': '''flax>=0.4.1,<=0.7.0''', '''ftfy''': '''ftfy''', '''fugashi''': '''fugashi>=1.0''', '''GitPython''': '''GitPython<3.1.19''', '''hf-doc-builder''': '''hf-doc-builder>=0.3.0''', '''huggingface-hub''': '''huggingface-hub>=0.14.1,<1.0''', '''importlib_metadata''': '''importlib_metadata''', '''ipadic''': '''ipadic>=1.0.0,<2.0''', '''isort''': '''isort>=5.5.4''', '''jax''': '''jax>=0.2.8,!=0.3.2,<=0.4.13''', '''jaxlib''': '''jaxlib>=0.1.65,<=0.4.13''', '''jieba''': '''jieba''', '''kenlm''': '''kenlm''', '''keras-nlp''': '''keras-nlp>=0.3.1''', '''librosa''': '''librosa''', '''nltk''': '''nltk''', '''natten''': '''natten>=0.14.6''', '''numpy''': '''numpy>=1.17''', '''onnxconverter-common''': '''onnxconverter-common''', '''onnxruntime-tools''': '''onnxruntime-tools>=1.4.2''', '''onnxruntime''': '''onnxruntime>=1.4.0''', '''opencv-python''': '''opencv-python''', '''optuna''': '''optuna''', '''optax''': '''optax>=0.0.8,<=0.1.4''', '''packaging''': '''packaging>=20.0''', '''parameterized''': '''parameterized''', '''phonemizer''': '''phonemizer''', '''protobuf''': '''protobuf''', '''psutil''': '''psutil''', '''pyyaml''': '''pyyaml>=5.1''', '''pydantic''': '''pydantic<2''', '''pytest''': '''pytest>=7.2.0''', '''pytest-timeout''': '''pytest-timeout''', '''pytest-xdist''': '''pytest-xdist''', '''python''': '''python>=3.8.0''', '''ray[tune]''': '''ray[tune]''', '''regex''': '''regex!=2019.12.17''', '''requests''': '''requests''', '''rhoknp''': '''rhoknp>=1.1.0,<1.3.1''', '''rjieba''': '''rjieba''', '''rouge-score''': '''rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1''', '''ruff''': '''ruff>=0.0.241,<=0.0.259''', '''sacrebleu''': '''sacrebleu>=1.4.12,<2.0.0''', '''sacremoses''': '''sacremoses''', '''safetensors''': '''safetensors>=0.3.1''', '''sagemaker''': '''sagemaker>=2.31.0''', '''scikit-learn''': '''scikit-learn''', '''sentencepiece''': '''sentencepiece>=0.1.91,!=0.1.92''', '''sigopt''': '''sigopt''', '''starlette''': '''starlette''', '''sudachipy''': '''sudachipy>=0.6.6''', '''sudachidict_core''': '''sudachidict_core>=20220729''', '''tensorflow-cpu''': '''tensorflow-cpu>=2.6,<2.14''', '''tensorflow''': '''tensorflow>=2.6,<2.14''', '''tensorflow-text''': '''tensorflow-text<2.14''', '''tf2onnx''': '''tf2onnx''', '''timeout-decorator''': '''timeout-decorator''', '''timm''': '''timm''', '''tokenizers''': '''tokenizers>=0.11.1,!=0.11.3,<0.14''', '''torch''': '''torch>=1.9,!=1.12.0''', '''torchaudio''': '''torchaudio''', '''torchvision''': '''torchvision''', '''pyctcdecode''': '''pyctcdecode>=0.4.0''', '''tqdm''': '''tqdm>=4.27''', '''unidic''': '''unidic>=1.0.2''', '''unidic_lite''': '''unidic_lite>=1.0.7''', '''urllib3''': '''urllib3<2.0.0''', '''uvicorn''': '''uvicorn''', }
476
1
"""simple docstring""" def __lowerCAmelCase ( __UpperCamelCase : int ): '''simple docstring''' assert isinstance(__UpperCamelCase , __UpperCamelCase ), F'The input value of [n={number}] is not an integer' if number == 1: return 2 elif number < 1: snake_case_ : Union[str, Any] = F'The input value of [n={number}] has to be > 0' raise ValueError(__UpperCamelCase ) else: snake_case_ : Tuple = sylvester(number - 1 ) snake_case_ : Any = num - 1 snake_case_ : Optional[Any] = num return lower * upper + 1 if __name__ == "__main__": print(F'''The 8th number in Sylvester\'s sequence: {sylvester(8)}''')
58
"""simple docstring""" def __lowerCAmelCase ( __UpperCamelCase : list[list[int]] , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : list[int] ): '''simple docstring''' if graph[path[curr_ind - 1]][next_ver] == 0: return False # 2. Validate that next vertex is not already in path return not any(vertex == next_ver for vertex in path ) def __lowerCAmelCase ( __UpperCamelCase : list[list[int]] , __UpperCamelCase : list[int] , __UpperCamelCase : int ): '''simple docstring''' if curr_ind == len(__UpperCamelCase ): # return whether path exists between current and starting vertices return graph[path[curr_ind - 1]][path[0]] == 1 # Recursive Step for next_ver in range(0 , len(__UpperCamelCase ) ): if valid_connection(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): # Insert current vertex into path as next transition snake_case_ : List[str] = next_ver # Validate created path if util_hamilton_cycle(__UpperCamelCase , __UpperCamelCase , curr_ind + 1 ): return True # Backtrack snake_case_ : Tuple = -1 return False def __lowerCAmelCase ( __UpperCamelCase : list[list[int]] , __UpperCamelCase : int = 0 ): '''simple docstring''' snake_case_ : Tuple = [-1] * (len(__UpperCamelCase ) + 1) # initialize start and end of path with starting index snake_case_ : Optional[int] = start_index # evaluate and if we find answer return path either return empty array return path if util_hamilton_cycle(__UpperCamelCase , __UpperCamelCase , 1 ) else []
58
1
"""simple docstring""" import gc import random import unittest import numpy as np import torch from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModel, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, require_torch_gpu, skip_mps, slow, torch_device, ) from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class UpperCamelCase (__snake_case , __snake_case , __snake_case , unittest.TestCase ): _SCREAMING_SNAKE_CASE : Any = StableUnCLIPImgaImgPipeline _SCREAMING_SNAKE_CASE : Tuple = TEXT_GUIDED_IMAGE_VARIATION_PARAMS _SCREAMING_SNAKE_CASE : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS _SCREAMING_SNAKE_CASE : int = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess _SCREAMING_SNAKE_CASE : Tuple = frozenset([] ) def __snake_case ( self :Union[str, Any] ) ->Tuple: lowercase : Dict = 32 lowercase : Tuple = embedder_hidden_size # image encoding components lowercase : Tuple = CLIPImageProcessor(crop_size=32 , size=32 ) torch.manual_seed(0 ) lowercase : Union[str, Any] = CLIPVisionModelWithProjection( CLIPVisionConfig( hidden_size=__magic_name__ , projection_dim=__magic_name__ , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) ) # regular denoising components torch.manual_seed(0 ) lowercase : Tuple = StableUnCLIPImageNormalizer(embedding_dim=__magic_name__ ) lowercase : Optional[int] = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" ) torch.manual_seed(0 ) lowercase : int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) torch.manual_seed(0 ) lowercase : Tuple = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=__magic_name__ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) ) torch.manual_seed(0 ) lowercase : Dict = UNetaDConditionModel( sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__magic_name__ , layers_per_block=1 , upcast_attention=__magic_name__ , use_linear_projection=__magic_name__ , ) torch.manual_seed(0 ) lowercase : Dict = DDIMScheduler( beta_schedule="""scaled_linear""" , beta_start=0.0_0085 , beta_end=0.012 , prediction_type="""v_prediction""" , set_alpha_to_one=__magic_name__ , steps_offset=1 , ) torch.manual_seed(0 ) lowercase : Tuple = AutoencoderKL() lowercase : Union[str, Any] = { # image encoding components """feature_extractor""": feature_extractor, """image_encoder""": image_encoder.eval(), # image noising components """image_normalizer""": image_normalizer.eval(), """image_noising_scheduler""": image_noising_scheduler, # regular denoising components """tokenizer""": tokenizer, """text_encoder""": text_encoder.eval(), """unet""": unet.eval(), """scheduler""": scheduler, """vae""": vae.eval(), } return components def __snake_case ( self :Optional[int] , __magic_name__ :Optional[Any] , __magic_name__ :List[Any]=0 , __magic_name__ :List[Any]=True ) ->List[str]: if str(__magic_name__ ).startswith("""mps""" ): lowercase : Dict = torch.manual_seed(__magic_name__ ) else: lowercase : List[str] = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ ) lowercase : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ ) if pil_image: lowercase : str = input_image * 0.5 + 0.5 lowercase : Optional[Any] = input_image.clamp(0 , 1 ) lowercase : Dict = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() lowercase : List[Any] = DiffusionPipeline.numpy_to_pil(__magic_name__ )[0] return { "prompt": "An anime racoon running a marathon", "image": input_image, "generator": generator, "num_inference_steps": 2, "output_type": "np", } @skip_mps def __snake_case ( self :Any ) ->int: lowercase : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator lowercase : Union[str, Any] = self.get_dummy_components() lowercase : Union[str, Any] = StableUnCLIPImgaImgPipeline(**__magic_name__ ) lowercase : int = sd_pipe.to(__magic_name__ ) sd_pipe.set_progress_bar_config(disable=__magic_name__ ) lowercase : List[Any] = self.get_dummy_inputs(__magic_name__ ) inputs.update({"""image_embeds""": None} ) lowercase : List[str] = sd_pipe(**__magic_name__ ).images lowercase : Union[str, Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowercase : int = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def __snake_case ( self :str ) ->str: lowercase : str = torch_device in ["""cpu""", """mps"""] self._test_attention_slicing_forward_pass(test_max_difference=__magic_name__ ) def __snake_case ( self :List[Any] ) ->Tuple: lowercase : int = torch_device in ["""cpu""", """mps"""] self._test_inference_batch_single_identical(test_max_difference=__magic_name__ ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def __snake_case ( self :List[Any] ) ->Dict: self._test_xformers_attention_forwardGenerator_pass(test_max_difference=__magic_name__ ) @slow @require_torch_gpu class UpperCamelCase (unittest.TestCase ): def __snake_case ( self :Any ) ->str: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __snake_case ( self :Union[str, Any] ) ->str: lowercase : Optional[Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" ) lowercase : int = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy""" ) lowercase : Union[str, Any] = StableUnCLIPImgaImgPipeline.from_pretrained( """fusing/stable-unclip-2-1-l-img2img""" , torch_dtype=torch.floataa ) pipe.to(__magic_name__ ) pipe.set_progress_bar_config(disable=__magic_name__ ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() lowercase : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 ) lowercase : Optional[int] = pipe(__magic_name__ , """anime turle""" , generator=__magic_name__ , output_type="""np""" ) lowercase : int = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(__magic_name__ , __magic_name__ ) def __snake_case ( self :Any ) ->List[str]: lowercase : List[Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" ) lowercase : Optional[Any] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy""" ) lowercase : Optional[Any] = StableUnCLIPImgaImgPipeline.from_pretrained( """fusing/stable-unclip-2-1-h-img2img""" , torch_dtype=torch.floataa ) pipe.to(__magic_name__ ) pipe.set_progress_bar_config(disable=__magic_name__ ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() lowercase : List[str] = torch.Generator(device="""cpu""" ).manual_seed(0 ) lowercase : Dict = pipe(__magic_name__ , """anime turle""" , generator=__magic_name__ , output_type="""np""" ) lowercase : int = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(__magic_name__ , __magic_name__ ) def __snake_case ( self :Union[str, Any] ) ->Tuple: lowercase : Optional[int] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" ) torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() lowercase : Any = StableUnCLIPImgaImgPipeline.from_pretrained( """fusing/stable-unclip-2-1-h-img2img""" , torch_dtype=torch.floataa ) lowercase : Dict = pipe.to(__magic_name__ ) pipe.set_progress_bar_config(disable=__magic_name__ ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() lowercase : Dict = pipe( __magic_name__ , """anime turtle""" , num_inference_steps=2 , output_type="""np""" , ) lowercase : Optional[int] = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
702
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from transformers.utils import is_vision_available from transformers.utils.generic import TensorType from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import logging if is_vision_available(): import PIL _lowerCAmelCase = logging.get_logger(__name__) def UpperCamelCase ( _A ) -> List[List[ImageInput]]: if isinstance(_A , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(_A , (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(_A ): return [[videos]] raise ValueError(F"""Could not make batched video from {videos}""" ) class UpperCamelCase (__snake_case ): _SCREAMING_SNAKE_CASE : Union[str, Any] = ["""pixel_values"""] def __init__( self :Union[str, Any] , __magic_name__ :bool = True , __magic_name__ :Dict[str, int] = None , __magic_name__ :PILImageResampling = PILImageResampling.BILINEAR , __magic_name__ :bool = True , __magic_name__ :Dict[str, int] = None , __magic_name__ :bool = True , __magic_name__ :Union[int, float] = 1 / 255 , __magic_name__ :bool = True , __magic_name__ :bool = True , __magic_name__ :Optional[Union[float, List[float]]] = None , __magic_name__ :Optional[Union[float, List[float]]] = None , **__magic_name__ :List[str] , ) ->None: super().__init__(**__magic_name__ ) lowercase : str = size if size is not None else {"""shortest_edge""": 256} lowercase : Union[str, Any] = get_size_dict(__magic_name__ , default_to_square=__magic_name__ ) lowercase : str = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} lowercase : Union[str, Any] = get_size_dict(__magic_name__ , param_name="""crop_size""" ) lowercase : Union[str, Any] = do_resize lowercase : Any = size lowercase : int = do_center_crop lowercase : Any = crop_size lowercase : Tuple = resample lowercase : str = do_rescale lowercase : Tuple = rescale_factor lowercase : Optional[Any] = offset lowercase : Any = do_normalize lowercase : List[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN lowercase : Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD def __snake_case ( self :Optional[int] , __magic_name__ :np.ndarray , __magic_name__ :Dict[str, int] , __magic_name__ :PILImageResampling = PILImageResampling.BILINEAR , __magic_name__ :Optional[Union[str, ChannelDimension]] = None , **__magic_name__ :Any , ) ->np.ndarray: lowercase : Any = get_size_dict(__magic_name__ , default_to_square=__magic_name__ ) if "shortest_edge" in size: lowercase : Union[str, Any] = get_resize_output_image_size(__magic_name__ , size["""shortest_edge"""] , default_to_square=__magic_name__ ) elif "height" in size and "width" in size: lowercase : List[str] = (size["""height"""], size["""width"""]) else: raise ValueError(f"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" ) return resize(__magic_name__ , size=__magic_name__ , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ ) def __snake_case ( self :int , __magic_name__ :np.ndarray , __magic_name__ :Dict[str, int] , __magic_name__ :Optional[Union[str, ChannelDimension]] = None , **__magic_name__ :Dict , ) ->np.ndarray: lowercase : Any = get_size_dict(__magic_name__ ) if "height" not in size or "width" not in size: raise ValueError(f"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" ) return center_crop(__magic_name__ , size=(size["""height"""], size["""width"""]) , data_format=__magic_name__ , **__magic_name__ ) def __snake_case ( self :Dict , __magic_name__ :np.ndarray , __magic_name__ :Union[int, float] , __magic_name__ :bool = True , __magic_name__ :Optional[Union[str, ChannelDimension]] = None , **__magic_name__ :Union[str, Any] , ) ->Union[str, Any]: lowercase : Dict = image.astype(np.floataa ) if offset: lowercase : List[str] = image - (scale / 2) return rescale(__magic_name__ , scale=__magic_name__ , data_format=__magic_name__ , **__magic_name__ ) def __snake_case ( self :Dict , __magic_name__ :np.ndarray , __magic_name__ :Union[float, List[float]] , __magic_name__ :Union[float, List[float]] , __magic_name__ :Optional[Union[str, ChannelDimension]] = None , **__magic_name__ :Union[str, Any] , ) ->np.ndarray: return normalize(__magic_name__ , mean=__magic_name__ , std=__magic_name__ , data_format=__magic_name__ , **__magic_name__ ) def __snake_case ( self :Tuple , __magic_name__ :ImageInput , __magic_name__ :bool = None , __magic_name__ :Dict[str, int] = None , __magic_name__ :PILImageResampling = None , __magic_name__ :bool = None , __magic_name__ :Dict[str, int] = None , __magic_name__ :bool = None , __magic_name__ :float = None , __magic_name__ :bool = None , __magic_name__ :bool = None , __magic_name__ :Optional[Union[float, List[float]]] = None , __magic_name__ :Optional[Union[float, List[float]]] = None , __magic_name__ :Optional[ChannelDimension] = ChannelDimension.FIRST , ) ->np.ndarray: if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) if offset and not do_rescale: raise ValueError("""For offset, do_rescale must also be set to True.""" ) # All transformations expect numpy arrays. lowercase : Union[str, Any] = to_numpy_array(__magic_name__ ) if do_resize: lowercase : int = self.resize(image=__magic_name__ , size=__magic_name__ , resample=__magic_name__ ) if do_center_crop: lowercase : Union[str, Any] = self.center_crop(__magic_name__ , size=__magic_name__ ) if do_rescale: lowercase : Dict = self.rescale(image=__magic_name__ , scale=__magic_name__ , offset=__magic_name__ ) if do_normalize: lowercase : Tuple = self.normalize(image=__magic_name__ , mean=__magic_name__ , std=__magic_name__ ) lowercase : List[Any] = to_channel_dimension_format(__magic_name__ , __magic_name__ ) return image def __snake_case ( self :Optional[int] , __magic_name__ :ImageInput , __magic_name__ :bool = None , __magic_name__ :Dict[str, int] = None , __magic_name__ :PILImageResampling = None , __magic_name__ :bool = None , __magic_name__ :Dict[str, int] = None , __magic_name__ :bool = None , __magic_name__ :float = None , __magic_name__ :bool = None , __magic_name__ :bool = None , __magic_name__ :Optional[Union[float, List[float]]] = None , __magic_name__ :Optional[Union[float, List[float]]] = None , __magic_name__ :Optional[Union[str, TensorType]] = None , __magic_name__ :ChannelDimension = ChannelDimension.FIRST , **__magic_name__ :Any , ) ->PIL.Image.Image: lowercase : List[str] = do_resize if do_resize is not None else self.do_resize lowercase : Optional[int] = resample if resample is not None else self.resample lowercase : int = do_center_crop if do_center_crop is not None else self.do_center_crop lowercase : List[Any] = do_rescale if do_rescale is not None else self.do_rescale lowercase : int = rescale_factor if rescale_factor is not None else self.rescale_factor lowercase : Optional[Any] = offset if offset is not None else self.offset lowercase : List[str] = do_normalize if do_normalize is not None else self.do_normalize lowercase : Any = image_mean if image_mean is not None else self.image_mean lowercase : Optional[int] = image_std if image_std is not None else self.image_std lowercase : List[Any] = size if size is not None else self.size lowercase : str = get_size_dict(__magic_name__ , default_to_square=__magic_name__ ) lowercase : str = crop_size if crop_size is not None else self.crop_size lowercase : List[Any] = get_size_dict(__magic_name__ , param_name="""crop_size""" ) if not valid_images(__magic_name__ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) lowercase : int = make_batched(__magic_name__ ) lowercase : Dict = [ [ self._preprocess_image( image=__magic_name__ , do_resize=__magic_name__ , size=__magic_name__ , resample=__magic_name__ , do_center_crop=__magic_name__ , crop_size=__magic_name__ , do_rescale=__magic_name__ , rescale_factor=__magic_name__ , offset=__magic_name__ , do_normalize=__magic_name__ , image_mean=__magic_name__ , image_std=__magic_name__ , data_format=__magic_name__ , ) for img in video ] for video in videos ] lowercase : List[str] = {"""pixel_values""": videos} return BatchFeature(data=__magic_name__ , tensor_type=__magic_name__ )
348
0
"""simple docstring""" import argparse import logging import pickle import random import time import numpy as np from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO ) lowerCAmelCase__ =logging.getLogger(__name__) def _a ( ) -> Tuple: __SCREAMING_SNAKE_CASE = argparse.ArgumentParser( description='''Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).''' ) parser.add_argument('''--file_path''' , type=lowerCamelCase__ , default='''data/dump.txt''' , help='''The path to the data.''' ) parser.add_argument('''--tokenizer_type''' , type=lowerCamelCase__ , default='''bert''' , choices=['''bert''', '''roberta''', '''gpt2'''] ) parser.add_argument('''--tokenizer_name''' , type=lowerCamelCase__ , default='''bert-base-uncased''' , help='''The tokenizer to use.''' ) parser.add_argument('''--dump_file''' , type=lowerCamelCase__ , default='''data/dump''' , help='''The dump file prefix.''' ) __SCREAMING_SNAKE_CASE = parser.parse_args() logger.info(f"""Loading Tokenizer ({args.tokenizer_name})""" ) if args.tokenizer_type == "bert": __SCREAMING_SNAKE_CASE = BertTokenizer.from_pretrained(args.tokenizer_name ) __SCREAMING_SNAKE_CASE = tokenizer.special_tokens_map['''cls_token'''] # `[CLS]` __SCREAMING_SNAKE_CASE = tokenizer.special_tokens_map['''sep_token'''] # `[SEP]` elif args.tokenizer_type == "roberta": __SCREAMING_SNAKE_CASE = RobertaTokenizer.from_pretrained(args.tokenizer_name ) __SCREAMING_SNAKE_CASE = tokenizer.special_tokens_map['''cls_token'''] # `<s>` __SCREAMING_SNAKE_CASE = tokenizer.special_tokens_map['''sep_token'''] # `</s>` elif args.tokenizer_type == "gpt2": __SCREAMING_SNAKE_CASE = GPTaTokenizer.from_pretrained(args.tokenizer_name ) __SCREAMING_SNAKE_CASE = tokenizer.special_tokens_map['''bos_token'''] # `<|endoftext|>` __SCREAMING_SNAKE_CASE = tokenizer.special_tokens_map['''eos_token'''] # `<|endoftext|>` logger.info(f"""Loading text from {args.file_path}""" ) with open(args.file_path , '''r''' , encoding='''utf8''' ) as fp: __SCREAMING_SNAKE_CASE = fp.readlines() logger.info('''Start encoding''' ) logger.info(f"""{len(lowerCamelCase__ )} examples to process.""" ) __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 1_00_00 __SCREAMING_SNAKE_CASE = time.time() for text in data: __SCREAMING_SNAKE_CASE = f"""{bos} {text.strip()} {sep}""" __SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) rslt.append(lowerCamelCase__ ) iter += 1 if iter % interval == 0: __SCREAMING_SNAKE_CASE = time.time() logger.info(f"""{iter} examples processed. - {(end-start):.2f}s/{interval}expl""" ) __SCREAMING_SNAKE_CASE = time.time() logger.info('''Finished binarization''' ) logger.info(f"""{len(lowerCamelCase__ )} examples processed.""" ) __SCREAMING_SNAKE_CASE = f"""{args.dump_file}.{args.tokenizer_name}.pickle""" __SCREAMING_SNAKE_CASE = tokenizer.vocab_size if vocab_size < (1 << 16): __SCREAMING_SNAKE_CASE = [np.uintaa(lowerCamelCase__ ) for d in rslt] else: __SCREAMING_SNAKE_CASE = [np.intaa(lowerCamelCase__ ) for d in rslt] random.shuffle(rslt_ ) logger.info(f"""Dump to {dp_file}""" ) with open(lowerCamelCase__ , '''wb''' ) as handle: pickle.dump(rslt_ , lowerCamelCase__ , protocol=pickle.HIGHEST_PROTOCOL ) if __name__ == "__main__": main()
482
"""simple docstring""" from itertools import permutations def _UpperCAmelCase ( lowerCamelCase__ ): """simple docstring""" if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False lowerCAmelCase__ = [7, 11, 13, 17] for i, test in enumerate(lowerCamelCase__ ): if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0: return False return True def _UpperCAmelCase ( lowerCamelCase__ = 10 ): """simple docstring""" return sum( int("""""".join(map(lowerCamelCase__ , lowerCamelCase__ ) ) ) for num in permutations(range(lowerCamelCase__ ) ) if is_substring_divisible(lowerCamelCase__ ) ) if __name__ == "__main__": print(F"{solution() = }")
644
0
import argparse import glob import importlib.util import os import re import black from doc_builder.style_doc import style_docstrings_in_code # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py lowercase : List[Any] = """src/diffusers""" lowercase : Dict = """.""" # This is to make sure the diffusers module imported is the one in the repo. lowercase : Dict = importlib.util.spec_from_file_location( """diffusers""", os.path.join(DIFFUSERS_PATH, """__init__.py"""), submodule_search_locations=[DIFFUSERS_PATH], ) lowercase : Optional[Any] = spec.loader.load_module() def A_ ( A__ , A__ ) -> List[Any]: return line.startswith(A__ ) or len(A__ ) <= 1 or re.search(R'^\s*\)(\s*->.*:|:)\s*$' , A__ ) is not None def A_ ( A__ ) -> Tuple: a__ : Union[str, Any] = object_name.split('.' ) a__ : Optional[Any] = 0 # First let's find the module where our object lives. a__ : Union[str, Any] = parts[i] while i < len(A__ ) and not os.path.isfile(os.path.join(A__ , F'{module}.py' ) ): i += 1 if i < len(A__ ): a__ : Optional[Any] = os.path.join(A__ , parts[i] ) if i >= len(A__ ): raise ValueError(F'`object_name` should begin with the name of a module of diffusers but got {object_name}.' ) with open(os.path.join(A__ , F'{module}.py' ) , 'r' , encoding='utf-8' , newline='\n' ) as f: a__ : Optional[int] = f.readlines() # Now let's find the class / func in the code! a__ : List[Any] = '' a__ : Tuple = 0 for name in parts[i + 1 :]: while ( line_index < len(A__ ) and re.search(RF'^{indent}(class|def)\s+{name}(\(|\:)' , lines[line_index] ) is None ): line_index += 1 indent += " " line_index += 1 if line_index >= len(A__ ): raise ValueError(F' {object_name} does not match any function or class in {module}.' ) # We found the beginning of the class / func, now let's find the end (when the indent diminishes). a__ : str = line_index while line_index < len(A__ ) and _should_continue(lines[line_index] , A__ ): line_index += 1 # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 a__ : str = lines[start_index:line_index] return "".join(A__ ) lowercase : Dict = re.compile(r"""^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)""") lowercase : Optional[Any] = re.compile(r"""^\s*(\S+)->(\S+)(\s+.*|$)""") lowercase : Union[str, Any] = re.compile(r"""<FILL\s+[^>]*>""") def A_ ( A__ ) -> int: a__ : Any = code.split('\n' ) a__ : Any = 0 while idx < len(A__ ) and len(lines[idx] ) == 0: idx += 1 if idx < len(A__ ): return re.search(R'^(\s*)\S' , lines[idx] ).groups()[0] return "" def A_ ( A__ ) -> Tuple: a__ : List[Any] = len(get_indent(A__ ) ) > 0 if has_indent: a__ : Tuple = F'class Bla:\n{code}' a__ : int = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=A__ ) a__ : Dict = black.format_str(A__ , mode=A__ ) a__ , a__ : Dict = style_docstrings_in_code(A__ ) return result[len('class Bla:\n' ) :] if has_indent else result def A_ ( A__ , A__=False ) -> Dict: with open(A__ , 'r' , encoding='utf-8' , newline='\n' ) as f: a__ : Dict = f.readlines() a__ : List[Any] = [] a__ : List[str] = 0 # Not a for loop cause `lines` is going to change (if `overwrite=True`). while line_index < len(A__ ): a__ : Dict = _re_copy_warning.search(lines[line_index] ) if search is None: line_index += 1 continue # There is some copied code here, let's retrieve the original. a__ , a__ , a__ : Any = search.groups() a__ : Tuple = find_code_in_diffusers(A__ ) a__ : List[str] = get_indent(A__ ) a__ : Optional[Any] = line_index + 1 if indent == theoretical_indent else line_index + 2 a__ : Union[str, Any] = theoretical_indent a__ : List[Any] = start_index # Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment. a__ : Dict = True while line_index < len(A__ ) and should_continue: line_index += 1 if line_index >= len(A__ ): break a__ : Dict = lines[line_index] a__ : Optional[int] = _should_continue(A__ , A__ ) and re.search(F'^{indent}# End copy' , A__ ) is None # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 a__ : int = lines[start_index:line_index] a__ : Optional[Any] = ''.join(A__ ) # Remove any nested `Copied from` comments to avoid circular copies a__ : Tuple = [line for line in theoretical_code.split('\n' ) if _re_copy_warning.search(A__ ) is None] a__ : Optional[Any] = '\n'.join(A__ ) # Before comparing, use the `replace_pattern` on the original code. if len(A__ ) > 0: a__ : Optional[Any] = replace_pattern.replace('with' , '' ).split(',' ) a__ : Optional[int] = [_re_replace_pattern.search(A__ ) for p in patterns] for pattern in patterns: if pattern is None: continue a__ , a__ , a__ : List[str] = pattern.groups() a__ : int = re.sub(A__ , A__ , A__ ) if option.strip() == "all-casing": a__ : Any = re.sub(obja.lower() , obja.lower() , A__ ) a__ : str = re.sub(obja.upper() , obja.upper() , A__ ) # Blackify after replacement. To be able to do that, we need the header (class or function definition) # from the previous line a__ : List[str] = blackify(lines[start_index - 1] + theoretical_code ) a__ : Optional[Any] = theoretical_code[len(lines[start_index - 1] ) :] # Test for a diff and act accordingly. if observed_code != theoretical_code: diffs.append([object_name, start_index] ) if overwrite: a__ : Dict = lines[:start_index] + [theoretical_code] + lines[line_index:] a__ : Optional[Any] = start_index + 1 if overwrite and len(A__ ) > 0: # Warn the user a file has been modified. print(F'Detected changes, rewriting {filename}.' ) with open(A__ , 'w' , encoding='utf-8' , newline='\n' ) as f: f.writelines(A__ ) return diffs def A_ ( A__ = False ) -> str: a__ : Tuple = glob.glob(os.path.join(A__ , '**/*.py' ) , recursive=A__ ) a__ : Any = [] for filename in all_files: a__ : Union[str, Any] = is_copy_consistent(A__ , A__ ) diffs += [F'- {filename}: copy does not match {d[0]} at line {d[1]}' for d in new_diffs] if not overwrite and len(A__ ) > 0: a__ : Optional[Any] = '\n'.join(A__ ) raise Exception( 'Found the following copy inconsistencies:\n' + diff + '\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.' ) if __name__ == "__main__": lowercase : str = argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") lowercase : Optional[int] = parser.parse_args() check_copies(args.fix_and_overwrite)
392
from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase : Optional[Any] = logging.get_logger(__name__) lowercase : Optional[int] = { """sayakpaul/vit-msn-base""": """https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json""", # See all ViT MSN models at https://huggingface.co/models?filter=vit_msn } class A__ ( __UpperCAmelCase ): """simple docstring""" __A : List[Any] = '''vit_msn''' def __init__( self , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=1e-06 , lowercase=224 , lowercase=16 , lowercase=3 , lowercase=True , **lowercase , ) -> Dict: '''simple docstring''' super().__init__(**lowercase) a__ : Tuple = hidden_size a__ : Optional[Any] = num_hidden_layers a__ : str = num_attention_heads a__ : Optional[Any] = intermediate_size a__ : Optional[Any] = hidden_act a__ : int = hidden_dropout_prob a__ : Optional[int] = attention_probs_dropout_prob a__ : List[Any] = initializer_range a__ : Optional[int] = layer_norm_eps a__ : List[str] = image_size a__ : Optional[int] = patch_size a__ : List[str] = num_channels a__ : Dict = qkv_bias
392
1
import os import pytest from attr import dataclass _lowercase = """us-east-1""" # defaults region @dataclass class lowercase_ : __lowerCamelCase = 42 __lowerCamelCase = "arn:aws:iam::558105141721:role/sagemaker_execution_role" __lowerCamelCase = { "task_name": "mnli", "per_device_train_batch_size": 1_6, "per_device_eval_batch_size": 1_6, "do_train": True, "do_eval": True, "do_predict": True, "output_dir": "/opt/ml/model", "overwrite_output_dir": True, "max_steps": 5_0_0, "save_steps": 5_5_0_0, } __lowerCamelCase = {**hyperparameters, "max_steps": 1_0_0_0} @property def _snake_case ( self ) -> str: if self.framework == "pytorch": return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"}, {"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"}, ] else: return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"}, {"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"}, ] @property def _snake_case ( self ) -> str: return F'{self.framework}-transfromers-test' @property def _snake_case ( self ) -> str: return F'./tests/sagemaker/scripts/{self.framework}' @property def _snake_case ( self ) -> str: if self.framework == "pytorch": return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04" else: return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04" @pytest.fixture(scope='''class''' ) def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : Dict ) -> Tuple: SCREAMING_SNAKE_CASE_ : Union[str, Any] =SageMakerTestEnvironment(framework=request.cls.framework )
443
from __future__ import annotations def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : list[int | float] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> int | float: if len(UpperCAmelCase_ ) == 0: raise ValueError('''find_max() arg is an empty sequence''' ) if ( left >= len(UpperCAmelCase_ ) or left < -len(UpperCAmelCase_ ) or right >= len(UpperCAmelCase_ ) or right < -len(UpperCAmelCase_ ) ): raise IndexError('''list index out of range''' ) if left == right: return nums[left] SCREAMING_SNAKE_CASE_ : Optional[int] =(left + right) >> 1 # the middle SCREAMING_SNAKE_CASE_ : Dict =find_max(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) # find max in range[left, mid] SCREAMING_SNAKE_CASE_ : Dict =find_max(UpperCAmelCase_ , mid + 1 , UpperCAmelCase_ ) # find max in range[mid + 1, right] return left_max if left_max >= right_max else right_max if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
443
1
import itertools from dataclasses import dataclass from typing import List, Optional import pyarrow as pa import pyarrow.parquet as pq import datasets from datasets.table import table_cast _A = datasets.utils.logging.get_logger(__name__) @dataclass class lowerCamelCase_ ( datasets.BuilderConfig ): _lowerCamelCase : int = 10000 _lowerCamelCase : Optional[List[str]] = None _lowerCamelCase : Optional[datasets.Features] = None class lowerCamelCase_ ( datasets.ArrowBasedBuilder ): _lowerCamelCase : Optional[Any] = ParquetConfig def __magic_name__ ( self ): return datasets.DatasetInfo(features=self.config.features ) def __magic_name__ ( self , _SCREAMING_SNAKE_CASE ): if not self.config.data_files: raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" ) a_ = dl_manager.download_and_extract(self.config.data_files ) if isinstance(_SCREAMING_SNAKE_CASE , (str, list, tuple) ): a_ = data_files if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): a_ = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive a_ = [dl_manager.iter_files(_SCREAMING_SNAKE_CASE ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )] a_ = [] for split_name, files in data_files.items(): if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): a_ = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive a_ = [dl_manager.iter_files(_SCREAMING_SNAKE_CASE ) for file in files] # Infer features is they are stoed in the arrow schema if self.info.features is None: for file in itertools.chain.from_iterable(_SCREAMING_SNAKE_CASE ): with open(_SCREAMING_SNAKE_CASE , """rb""" ) as f: a_ = datasets.Features.from_arrow_schema(pq.read_schema(_SCREAMING_SNAKE_CASE ) ) break splits.append(datasets.SplitGenerator(name=_SCREAMING_SNAKE_CASE , gen_kwargs={"""files""": files} ) ) return splits def __magic_name__ ( self , _SCREAMING_SNAKE_CASE ): if self.info.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example a_ = table_cast(_SCREAMING_SNAKE_CASE , self.info.features.arrow_schema ) return pa_table def __magic_name__ ( self , _SCREAMING_SNAKE_CASE ): a_ = self.info.features.arrow_schema if self.info.features is not None else None if self.info.features is not None and self.config.columns is not None: if sorted(field.name for field in schema ) != sorted(self.config.columns ): raise ValueError( f"""Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'""" ) for file_idx, file in enumerate(itertools.chain.from_iterable(_SCREAMING_SNAKE_CASE ) ): with open(_SCREAMING_SNAKE_CASE , """rb""" ) as f: a_ = pq.ParquetFile(_SCREAMING_SNAKE_CASE ) try: for batch_idx, record_batch in enumerate( parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ): a_ = pa.Table.from_batches([record_batch] ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield f"""{file_idx}_{batch_idx}""", self._cast_table(_SCREAMING_SNAKE_CASE ) except ValueError as e: logger.error(f"""Failed to read file '{file}' with error {type(_SCREAMING_SNAKE_CASE )}: {e}""" ) raise
403
import argparse import json import subprocess def __SCREAMING_SNAKE_CASE ( UpperCamelCase : List[str] , UpperCamelCase : Tuple ) -> Dict: """simple docstring""" a_ = [] a_ = ( F"""curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\"""" """ https://api.github.com/repos/huggingface/transformers/actions/runners""" ) a_ = subprocess.run(UpperCamelCase , shell=UpperCamelCase , stdout=subprocess.PIPE ) a_ = output.stdout.decode("""utf-8""" ) a_ = json.loads(UpperCamelCase ) a_ = status["""runners"""] for runner in runners: if runner["name"] in target_runners: if runner["status"] == "offline": offline_runners.append(UpperCamelCase ) # save the result so we can report them on Slack with open("""offline_runners.txt""" , """w""" ) as fp: fp.write(json.dumps(UpperCamelCase ) ) if len(UpperCamelCase ) > 0: a_ = """\n""".join([x["""name"""] for x in offline_runners] ) raise ValueError(F"""The following runners are offline:\n{failed}""" ) if __name__ == "__main__": def __SCREAMING_SNAKE_CASE ( UpperCamelCase : List[str] ) -> str: """simple docstring""" return values.split(""",""" ) _A = argparse.ArgumentParser() # Required parameters parser.add_argument( '--target_runners', default=None, type=list_str, required=True, help='Comma-separated list of runners to check status.', ) parser.add_argument( '--token', default=None, type=str, required=True, help='A token that has actions:read permission.' ) _A = parser.parse_args() get_runner_status(args.target_runners, args.token)
403
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices A = logging.get_logger(__name__) A = { """shi-labs/dinat-mini-in1k-224""": """https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json""", # See all Dinat models at https://huggingface.co/models?filter=dinat } class a__ ( __magic_name__ , __magic_name__ ): lowercase_ = "dinat" lowercase_ = { "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self : Optional[int] , UpperCamelCase_ : int=4 , UpperCamelCase_ : str=3 , UpperCamelCase_ : Optional[Any]=64 , UpperCamelCase_ : Union[str, Any]=[3, 4, 6, 5] , UpperCamelCase_ : Union[str, Any]=[2, 4, 8, 16] , UpperCamelCase_ : List[Any]=7 , UpperCamelCase_ : List[str]=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , UpperCamelCase_ : Tuple=3.0 , UpperCamelCase_ : List[Any]=True , UpperCamelCase_ : Dict=0.0 , UpperCamelCase_ : Union[str, Any]=0.0 , UpperCamelCase_ : int=0.1 , UpperCamelCase_ : int="gelu" , UpperCamelCase_ : Dict=0.02 , UpperCamelCase_ : List[Any]=1e-5 , UpperCamelCase_ : List[str]=0.0 , UpperCamelCase_ : int=None , UpperCamelCase_ : Optional[Any]=None , **UpperCamelCase_ : List[Any] , ): """simple docstring""" super().__init__(**UpperCamelCase_) __UpperCAmelCase : Union[str, Any] = patch_size __UpperCAmelCase : int = num_channels __UpperCAmelCase : List[str] = embed_dim __UpperCAmelCase : List[Any] = depths __UpperCAmelCase : List[str] = len(UpperCamelCase_) __UpperCAmelCase : Tuple = num_heads __UpperCAmelCase : Union[str, Any] = kernel_size __UpperCAmelCase : Dict = dilations __UpperCAmelCase : Optional[int] = mlp_ratio __UpperCAmelCase : Tuple = qkv_bias __UpperCAmelCase : Dict = hidden_dropout_prob __UpperCAmelCase : Tuple = attention_probs_dropout_prob __UpperCAmelCase : str = drop_path_rate __UpperCAmelCase : int = hidden_act __UpperCAmelCase : Dict = layer_norm_eps __UpperCAmelCase : List[Any] = initializer_range # we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model __UpperCAmelCase : Dict = int(embed_dim * 2 ** (len(UpperCamelCase_) - 1)) __UpperCAmelCase : Dict = layer_scale_init_value __UpperCAmelCase : int = ["stem"] + [F"stage{idx}" for idx in range(1 , len(UpperCamelCase_) + 1)] __UpperCAmelCase , __UpperCAmelCase : str = get_aligned_output_features_output_indices( out_features=UpperCamelCase_ , out_indices=UpperCamelCase_ , stage_names=self.stage_names)
77
'''simple docstring''' import unittest import numpy as np from transformers import is_flax_available from transformers.testing_utils import require_flax from ..test_modeling_flax_common import ids_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.generation import ( FlaxForcedBOSTokenLogitsProcessor, FlaxForcedEOSTokenLogitsProcessor, FlaxLogitsProcessorList, FlaxMinLengthLogitsProcessor, FlaxTemperatureLogitsWarper, FlaxTopKLogitsWarper, FlaxTopPLogitsWarper, ) @require_flax class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = jnp.ones((batch_size, length) ) / length return scores def UpperCamelCase_ ( self ): lowerCamelCase__ = None lowerCamelCase__ = 20 lowerCamelCase__ = self._get_uniform_logits(batch_size=2 ,length=_lowerCAmelCase ) # tweak scores to not be uniform anymore lowerCamelCase__ = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch lowerCamelCase__ = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch # compute softmax lowerCamelCase__ = jax.nn.softmax(_lowerCAmelCase ,axis=-1 ) lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 ) lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=1.3 ) lowerCamelCase__ = jax.nn.softmax(temp_dist_warper_sharper(_lowerCAmelCase ,scores.copy() ,cur_len=_lowerCAmelCase ) ,axis=-1 ) lowerCamelCase__ = jax.nn.softmax(temp_dist_warper_smoother(_lowerCAmelCase ,scores.copy() ,cur_len=_lowerCAmelCase ) ,axis=-1 ) # uniform distribution stays uniform self.assertTrue(jnp.allclose(probs[0, :] ,warped_prob_sharp[0, :] ,atol=1E-3 ) ) self.assertTrue(jnp.allclose(probs[0, :] ,warped_prob_smooth[0, :] ,atol=1E-3 ) ) # sharp peaks get higher, valleys get lower self.assertLess(probs[1, :].max() ,warped_prob_sharp[1, :].max() ) self.assertGreater(probs[1, :].min() ,warped_prob_sharp[1, :].min() ) # smooth peaks get lower, valleys get higher self.assertGreater(probs[1, :].max() ,warped_prob_smooth[1, :].max() ) self.assertLess(probs[1, :].min() ,warped_prob_smooth[1, :].min() ) def UpperCamelCase_ ( self ): lowerCamelCase__ = None lowerCamelCase__ = 10 lowerCamelCase__ = 2 # create ramp distribution lowerCamelCase__ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] ,(batch_size, vocab_size) ).copy() lowerCamelCase__ = ramp_logits[1:, : vocab_size // 2] + vocab_size lowerCamelCase__ = FlaxTopKLogitsWarper(3 ) lowerCamelCase__ = top_k_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) # check that correct tokens are filtered self.assertListEqual(jnp.isinf(scores[0] ).tolist() ,7 * [True] + 3 * [False] ) self.assertListEqual(jnp.isinf(scores[1] ).tolist() ,2 * [True] + 3 * [False] + 5 * [True] ) # check special case lowerCamelCase__ = 5 lowerCamelCase__ = FlaxTopKLogitsWarper(top_k=1 ,filter_value=0.0 ,min_tokens_to_keep=3 ) lowerCamelCase__ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] ,(batch_size, length) ).copy() lowerCamelCase__ = top_k_warp_safety_check(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) # min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() ,[2, 2] ) def UpperCamelCase_ ( self ): lowerCamelCase__ = None lowerCamelCase__ = 10 lowerCamelCase__ = 2 # create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper) lowerCamelCase__ = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) ) lowerCamelCase__ = FlaxTopPLogitsWarper(0.8 ) lowerCamelCase__ = np.exp(top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) ) # dist should be filtered to keep min num values so that sum is >= top_p # exp (-inf) => 0 lowerCamelCase__ = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] ) self.assertTrue(np.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-3 ) ) # check edge cases with negative and extreme logits lowerCamelCase__ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] ,(batch_size, vocab_size) ).copy() - ( vocab_size // 2 ) # make ramp_logits more extreme lowerCamelCase__ = ramp_logits[1] * 100.0 # make sure at least 2 tokens are kept lowerCamelCase__ = FlaxTopPLogitsWarper(0.9 ,min_tokens_to_keep=2 ,filter_value=0.0 ) lowerCamelCase__ = top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) # first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2. self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() ,[3, 2] ) def UpperCamelCase_ ( self ): lowerCamelCase__ = 20 lowerCamelCase__ = 4 lowerCamelCase__ = 0 lowerCamelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowerCAmelCase ) # check that min length is applied at length 5 lowerCamelCase__ = ids_tensor((batch_size, 20) ,vocab_size=20 ) lowerCamelCase__ = 5 lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = min_dist_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() ,4 * [-float("""inf""" )] ) # check that min length is not applied anymore at length 15 lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = 15 lowerCamelCase__ = min_dist_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() ) def UpperCamelCase_ ( self ): lowerCamelCase__ = 20 lowerCamelCase__ = 4 lowerCamelCase__ = 0 lowerCamelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase ) # check that all scores are -inf except the bos_token_id score lowerCamelCase__ = ids_tensor((batch_size, 1) ,vocab_size=20 ) lowerCamelCase__ = 1 lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() ) self.assertListEqual(scores[:, bos_token_id].tolist() ,4 * [0] ) # score for bos_token_id shold be zero # check that bos_token_id is not forced if current length is greater than 1 lowerCamelCase__ = 3 lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() ) def UpperCamelCase_ ( self ): lowerCamelCase__ = 20 lowerCamelCase__ = 4 lowerCamelCase__ = 0 lowerCamelCase__ = 5 lowerCamelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ) # check that all scores are -inf except the eos_token_id when max_length is reached lowerCamelCase__ = ids_tensor((batch_size, 4) ,vocab_size=20 ) lowerCamelCase__ = 4 lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() ) self.assertListEqual(scores[:, eos_token_id].tolist() ,4 * [0] ) # score for eos_token_id should be zero # check that eos_token_id is not forced if max_length is not reached lowerCamelCase__ = 3 lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() ) def UpperCamelCase_ ( self ): lowerCamelCase__ = 4 lowerCamelCase__ = 10 lowerCamelCase__ = 15 lowerCamelCase__ = 2 lowerCamelCase__ = 1 lowerCamelCase__ = 15 # dummy input_ids and scores lowerCamelCase__ = ids_tensor((batch_size, sequence_length) ,_lowerCAmelCase ) lowerCamelCase__ = input_ids.copy() lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = scores.copy() # instantiate all dist processors lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 ) lowerCamelCase__ = FlaxTopKLogitsWarper(3 ) lowerCamelCase__ = FlaxTopPLogitsWarper(0.8 ) # instantiate all logits processors lowerCamelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowerCAmelCase ) lowerCamelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase ) lowerCamelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ) lowerCamelCase__ = 10 # no processor list lowerCamelCase__ = temp_dist_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = top_k_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = min_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = bos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = eos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) # with processor list lowerCamelCase__ = FlaxLogitsProcessorList( [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] ) lowerCamelCase__ = processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) # scores should be equal self.assertTrue(jnp.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-3 ) ) # input_ids should never be changed self.assertListEqual(input_ids.tolist() ,input_ids_comp.tolist() ) def UpperCamelCase_ ( self ): lowerCamelCase__ = 4 lowerCamelCase__ = 10 lowerCamelCase__ = 15 lowerCamelCase__ = 2 lowerCamelCase__ = 1 lowerCamelCase__ = 15 # dummy input_ids and scores lowerCamelCase__ = ids_tensor((batch_size, sequence_length) ,_lowerCAmelCase ) lowerCamelCase__ = input_ids.copy() lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = scores.copy() # instantiate all dist processors lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 ) lowerCamelCase__ = FlaxTopKLogitsWarper(3 ) lowerCamelCase__ = FlaxTopPLogitsWarper(0.8 ) # instantiate all logits processors lowerCamelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowerCAmelCase ) lowerCamelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase ) lowerCamelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ) lowerCamelCase__ = 10 # no processor list def run_no_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = temp_dist_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = top_k_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = min_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = bos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) lowerCamelCase__ = eos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) return scores # with processor list def run_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = FlaxLogitsProcessorList( [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] ) lowerCamelCase__ = processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) return scores lowerCamelCase__ = jax.jit(_lowerCAmelCase ) lowerCamelCase__ = jax.jit(_lowerCAmelCase ) lowerCamelCase__ = jitted_run_no_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = jitted_run_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) # scores should be equal self.assertTrue(jnp.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-3 ) ) # input_ids should never be changed self.assertListEqual(input_ids.tolist() ,input_ids_comp.tolist() )
50
0
import unittest import numpy as np from transformers import is_flax_available from transformers.testing_utils import require_flax from ..test_modeling_flax_common import ids_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.generation import ( FlaxForcedBOSTokenLogitsProcessor, FlaxForcedEOSTokenLogitsProcessor, FlaxLogitsProcessorList, FlaxMinLengthLogitsProcessor, FlaxTemperatureLogitsWarper, FlaxTopKLogitsWarper, FlaxTopPLogitsWarper, ) @require_flax class _a ( unittest.TestCase ): """simple docstring""" def A_ ( self : Tuple , a : int , a : int ) ->Dict: SCREAMING_SNAKE_CASE__ : Union[str, Any] = jnp.ones((batch_size, length) ) / length return scores def A_ ( self : Tuple ) ->List[str]: SCREAMING_SNAKE_CASE__ : List[Any] = None SCREAMING_SNAKE_CASE__ : Tuple = 20 SCREAMING_SNAKE_CASE__ : Optional[int] = self._get_uniform_logits(batch_size=2 , length=lowerCAmelCase_ ) # tweak scores to not be uniform anymore SCREAMING_SNAKE_CASE__ : Union[str, Any] = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch SCREAMING_SNAKE_CASE__ : Optional[int] = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch # compute softmax SCREAMING_SNAKE_CASE__ : Optional[Any] = jax.nn.softmax(lowerCAmelCase_ , axis=-1 ) SCREAMING_SNAKE_CASE__ : List[Any] = FlaxTemperatureLogitsWarper(temperature=0.5 ) SCREAMING_SNAKE_CASE__ : Optional[int] = FlaxTemperatureLogitsWarper(temperature=1.3 ) SCREAMING_SNAKE_CASE__ : str = jax.nn.softmax(temp_dist_warper_sharper(lowerCAmelCase_ , scores.copy() , cur_len=lowerCAmelCase_ ) , axis=-1 ) SCREAMING_SNAKE_CASE__ : Any = jax.nn.softmax(temp_dist_warper_smoother(lowerCAmelCase_ , scores.copy() , cur_len=lowerCAmelCase_ ) , axis=-1 ) # uniform distribution stays uniform self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) ) self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) ) # sharp peaks get higher, valleys get lower self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() ) self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() ) # smooth peaks get lower, valleys get higher self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() ) self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() ) def A_ ( self : List[Any] ) ->Optional[Any]: SCREAMING_SNAKE_CASE__ : Dict = None SCREAMING_SNAKE_CASE__ : Optional[int] = 10 SCREAMING_SNAKE_CASE__ : Optional[Any] = 2 # create ramp distribution SCREAMING_SNAKE_CASE__ : List[str] = np.broadcast_to(np.arange(lowerCAmelCase_ )[None, :] , (batch_size, vocab_size) ).copy() SCREAMING_SNAKE_CASE__ : Optional[Any] = ramp_logits[1:, : vocab_size // 2] + vocab_size SCREAMING_SNAKE_CASE__ : str = FlaxTopKLogitsWarper(3 ) SCREAMING_SNAKE_CASE__ : str = top_k_warp(lowerCAmelCase_ , lowerCAmelCase_ , cur_len=lowerCAmelCase_ ) # check that correct tokens are filtered self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] ) self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] ) # check special case SCREAMING_SNAKE_CASE__ : Tuple = 5 SCREAMING_SNAKE_CASE__ : int = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 ) SCREAMING_SNAKE_CASE__ : List[Any] = np.broadcast_to(np.arange(lowerCAmelCase_ )[None, :] , (batch_size, length) ).copy() SCREAMING_SNAKE_CASE__ : Union[str, Any] = top_k_warp_safety_check(lowerCAmelCase_ , lowerCAmelCase_ , cur_len=lowerCAmelCase_ ) # min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] ) def A_ ( self : int ) ->Dict: SCREAMING_SNAKE_CASE__ : List[Any] = None SCREAMING_SNAKE_CASE__ : Tuple = 10 SCREAMING_SNAKE_CASE__ : str = 2 # create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper) SCREAMING_SNAKE_CASE__ : Dict = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) ) SCREAMING_SNAKE_CASE__ : int = FlaxTopPLogitsWarper(0.8 ) SCREAMING_SNAKE_CASE__ : Optional[Any] = np.exp(top_p_warp(lowerCAmelCase_ , lowerCAmelCase_ , cur_len=lowerCAmelCase_ ) ) # dist should be filtered to keep min num values so that sum is >= top_p # exp (-inf) => 0 SCREAMING_SNAKE_CASE__ : str = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] ) self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-3 ) ) # check edge cases with negative and extreme logits SCREAMING_SNAKE_CASE__ : Dict = np.broadcast_to(np.arange(lowerCAmelCase_ )[None, :] , (batch_size, vocab_size) ).copy() - ( vocab_size // 2 ) # make ramp_logits more extreme SCREAMING_SNAKE_CASE__ : Union[str, Any] = ramp_logits[1] * 100.0 # make sure at least 2 tokens are kept SCREAMING_SNAKE_CASE__ : Tuple = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 ) SCREAMING_SNAKE_CASE__ : Dict = top_p_warp(lowerCAmelCase_ , lowerCAmelCase_ , cur_len=lowerCAmelCase_ ) # first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2. self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] ) def A_ ( self : Dict ) ->Union[str, Any]: SCREAMING_SNAKE_CASE__ : List[str] = 20 SCREAMING_SNAKE_CASE__ : str = 4 SCREAMING_SNAKE_CASE__ : Optional[Any] = 0 SCREAMING_SNAKE_CASE__ : Any = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCAmelCase_ ) # check that min length is applied at length 5 SCREAMING_SNAKE_CASE__ : Tuple = ids_tensor((batch_size, 20) , vocab_size=20 ) SCREAMING_SNAKE_CASE__ : List[Any] = 5 SCREAMING_SNAKE_CASE__ : List[str] = self._get_uniform_logits(lowerCAmelCase_ , lowerCAmelCase_ ) SCREAMING_SNAKE_CASE__ : List[Any] = min_dist_processor(lowerCAmelCase_ , lowerCAmelCase_ , cur_len=lowerCAmelCase_ ) self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("inf" )] ) # check that min length is not applied anymore at length 15 SCREAMING_SNAKE_CASE__ : List[str] = self._get_uniform_logits(lowerCAmelCase_ , lowerCAmelCase_ ) SCREAMING_SNAKE_CASE__ : List[Any] = 15 SCREAMING_SNAKE_CASE__ : int = min_dist_processor(lowerCAmelCase_ , lowerCAmelCase_ , cur_len=lowerCAmelCase_ ) self.assertFalse(jnp.isinf(lowerCAmelCase_ ).any() ) def A_ ( self : str ) ->List[Any]: SCREAMING_SNAKE_CASE__ : Tuple = 20 SCREAMING_SNAKE_CASE__ : Tuple = 4 SCREAMING_SNAKE_CASE__ : Optional[Any] = 0 SCREAMING_SNAKE_CASE__ : Optional[int] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCAmelCase_ ) # check that all scores are -inf except the bos_token_id score SCREAMING_SNAKE_CASE__ : Tuple = ids_tensor((batch_size, 1) , vocab_size=20 ) SCREAMING_SNAKE_CASE__ : str = 1 SCREAMING_SNAKE_CASE__ : Tuple = self._get_uniform_logits(lowerCAmelCase_ , lowerCAmelCase_ ) SCREAMING_SNAKE_CASE__ : Tuple = logits_processor(lowerCAmelCase_ , lowerCAmelCase_ , cur_len=lowerCAmelCase_ ) self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() ) self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero # check that bos_token_id is not forced if current length is greater than 1 SCREAMING_SNAKE_CASE__ : Tuple = 3 SCREAMING_SNAKE_CASE__ : Dict = self._get_uniform_logits(lowerCAmelCase_ , lowerCAmelCase_ ) SCREAMING_SNAKE_CASE__ : Dict = logits_processor(lowerCAmelCase_ , lowerCAmelCase_ , cur_len=lowerCAmelCase_ ) self.assertFalse(jnp.isinf(lowerCAmelCase_ ).any() ) def A_ ( self : Optional[Any] ) ->List[Any]: SCREAMING_SNAKE_CASE__ : Union[str, Any] = 20 SCREAMING_SNAKE_CASE__ : Dict = 4 SCREAMING_SNAKE_CASE__ : List[Any] = 0 SCREAMING_SNAKE_CASE__ : Dict = 5 SCREAMING_SNAKE_CASE__ : Union[str, Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ ) # check that all scores are -inf except the eos_token_id when max_length is reached SCREAMING_SNAKE_CASE__ : Optional[int] = ids_tensor((batch_size, 4) , vocab_size=20 ) SCREAMING_SNAKE_CASE__ : List[Any] = 4 SCREAMING_SNAKE_CASE__ : Dict = self._get_uniform_logits(lowerCAmelCase_ , lowerCAmelCase_ ) SCREAMING_SNAKE_CASE__ : List[Any] = logits_processor(lowerCAmelCase_ , lowerCAmelCase_ , cur_len=lowerCAmelCase_ ) self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() ) self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero # check that eos_token_id is not forced if max_length is not reached SCREAMING_SNAKE_CASE__ : str = 3 SCREAMING_SNAKE_CASE__ : List[str] = self._get_uniform_logits(lowerCAmelCase_ , lowerCAmelCase_ ) SCREAMING_SNAKE_CASE__ : List[str] = logits_processor(lowerCAmelCase_ , lowerCAmelCase_ , cur_len=lowerCAmelCase_ ) self.assertFalse(jnp.isinf(lowerCAmelCase_ ).any() ) def A_ ( self : List[Any] ) ->Optional[Any]: SCREAMING_SNAKE_CASE__ : Union[str, Any] = 4 SCREAMING_SNAKE_CASE__ : Optional[Any] = 10 SCREAMING_SNAKE_CASE__ : str = 15 SCREAMING_SNAKE_CASE__ : int = 2 SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1 SCREAMING_SNAKE_CASE__ : Any = 15 # dummy input_ids and scores SCREAMING_SNAKE_CASE__ : str = ids_tensor((batch_size, sequence_length) , lowerCAmelCase_ ) SCREAMING_SNAKE_CASE__ : List[str] = input_ids.copy() SCREAMING_SNAKE_CASE__ : Tuple = self._get_uniform_logits(lowerCAmelCase_ , lowerCAmelCase_ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = scores.copy() # instantiate all dist processors SCREAMING_SNAKE_CASE__ : Tuple = FlaxTemperatureLogitsWarper(temperature=0.5 ) SCREAMING_SNAKE_CASE__ : Optional[int] = FlaxTopKLogitsWarper(3 ) SCREAMING_SNAKE_CASE__ : Optional[int] = FlaxTopPLogitsWarper(0.8 ) # instantiate all logits processors SCREAMING_SNAKE_CASE__ : int = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCAmelCase_ ) SCREAMING_SNAKE_CASE__ : str = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCAmelCase_ ) SCREAMING_SNAKE_CASE__ : List[str] = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ ) SCREAMING_SNAKE_CASE__ : List[str] = 10 # no processor list SCREAMING_SNAKE_CASE__ : List[Any] = temp_dist_warp(lowerCAmelCase_ , lowerCAmelCase_ , cur_len=lowerCAmelCase_ ) SCREAMING_SNAKE_CASE__ : Dict = top_k_warp(lowerCAmelCase_ , lowerCAmelCase_ , cur_len=lowerCAmelCase_ ) SCREAMING_SNAKE_CASE__ : Tuple = top_p_warp(lowerCAmelCase_ , lowerCAmelCase_ , cur_len=lowerCAmelCase_ ) SCREAMING_SNAKE_CASE__ : List[Any] = min_dist_proc(lowerCAmelCase_ , lowerCAmelCase_ , cur_len=lowerCAmelCase_ ) SCREAMING_SNAKE_CASE__ : Tuple = bos_dist_proc(lowerCAmelCase_ , lowerCAmelCase_ , cur_len=lowerCAmelCase_ ) SCREAMING_SNAKE_CASE__ : List[Any] = eos_dist_proc(lowerCAmelCase_ , lowerCAmelCase_ , cur_len=lowerCAmelCase_ ) # with processor list SCREAMING_SNAKE_CASE__ : int = FlaxLogitsProcessorList( [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] ) SCREAMING_SNAKE_CASE__ : Optional[Any] = processor(lowerCAmelCase_ , lowerCAmelCase_ , cur_len=lowerCAmelCase_ ) # scores should be equal self.assertTrue(jnp.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-3 ) ) # input_ids should never be changed self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() ) def A_ ( self : int ) ->List[Any]: SCREAMING_SNAKE_CASE__ : str = 4 SCREAMING_SNAKE_CASE__ : Union[str, Any] = 10 SCREAMING_SNAKE_CASE__ : List[Any] = 15 SCREAMING_SNAKE_CASE__ : Optional[Any] = 2 SCREAMING_SNAKE_CASE__ : Optional[int] = 1 SCREAMING_SNAKE_CASE__ : Union[str, Any] = 15 # dummy input_ids and scores SCREAMING_SNAKE_CASE__ : Any = ids_tensor((batch_size, sequence_length) , lowerCAmelCase_ ) SCREAMING_SNAKE_CASE__ : List[str] = input_ids.copy() SCREAMING_SNAKE_CASE__ : int = self._get_uniform_logits(lowerCAmelCase_ , lowerCAmelCase_ ) SCREAMING_SNAKE_CASE__ : List[str] = scores.copy() # instantiate all dist processors SCREAMING_SNAKE_CASE__ : Any = FlaxTemperatureLogitsWarper(temperature=0.5 ) SCREAMING_SNAKE_CASE__ : Any = FlaxTopKLogitsWarper(3 ) SCREAMING_SNAKE_CASE__ : Optional[Any] = FlaxTopPLogitsWarper(0.8 ) # instantiate all logits processors SCREAMING_SNAKE_CASE__ : int = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCAmelCase_ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCAmelCase_ ) SCREAMING_SNAKE_CASE__ : str = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ ) SCREAMING_SNAKE_CASE__ : Dict = 10 # no processor list def run_no_processor_list(a : int , a : Dict , a : List[Any] ): SCREAMING_SNAKE_CASE__ : Any = temp_dist_warp(lowerCAmelCase_ , lowerCAmelCase_ , cur_len=lowerCAmelCase_ ) SCREAMING_SNAKE_CASE__ : List[Any] = top_k_warp(lowerCAmelCase_ , lowerCAmelCase_ , cur_len=lowerCAmelCase_ ) SCREAMING_SNAKE_CASE__ : Tuple = top_p_warp(lowerCAmelCase_ , lowerCAmelCase_ , cur_len=lowerCAmelCase_ ) SCREAMING_SNAKE_CASE__ : str = min_dist_proc(lowerCAmelCase_ , lowerCAmelCase_ , cur_len=lowerCAmelCase_ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = bos_dist_proc(lowerCAmelCase_ , lowerCAmelCase_ , cur_len=lowerCAmelCase_ ) SCREAMING_SNAKE_CASE__ : Tuple = eos_dist_proc(lowerCAmelCase_ , lowerCAmelCase_ , cur_len=lowerCAmelCase_ ) return scores # with processor list def run_processor_list(a : Union[str, Any] , a : int , a : Optional[Any] ): SCREAMING_SNAKE_CASE__ : Tuple = FlaxLogitsProcessorList( [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] ) SCREAMING_SNAKE_CASE__ : Optional[int] = processor(lowerCAmelCase_ , lowerCAmelCase_ , cur_len=lowerCAmelCase_ ) return scores SCREAMING_SNAKE_CASE__ : List[Any] = jax.jit(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = jax.jit(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE__ : str = jitted_run_no_processor_list(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = jitted_run_processor_list(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # scores should be equal self.assertTrue(jnp.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-3 ) ) # input_ids should never be changed self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
711
from __future__ import annotations def UpperCAmelCase ( _lowerCamelCase : list[int | float] , _lowerCamelCase : int , _lowerCamelCase : int ): '''simple docstring''' if len(_lowerCamelCase ) == 0: raise ValueError("find_max() arg is an empty sequence" ) if ( left >= len(_lowerCamelCase ) or left < -len(_lowerCamelCase ) or right >= len(_lowerCamelCase ) or right < -len(_lowerCamelCase ) ): raise IndexError("list index out of range" ) if left == right: return nums[left] SCREAMING_SNAKE_CASE__ : Optional[int] = (left + right) >> 1 # the middle SCREAMING_SNAKE_CASE__ : List[Any] = find_max(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # find max in range[left, mid] SCREAMING_SNAKE_CASE__ : Optional[int] = find_max(_lowerCamelCase , mid + 1 , _lowerCamelCase ) # find max in range[mid + 1, right] return left_max if left_max >= right_max else right_max if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
26
0
"""simple docstring""" import sacrebleu as scb from packaging import version from sacrebleu import TER import datasets UpperCAmelCase : Dict = '\\n@inproceedings{snover-etal-2006-study,\n title = "A Study of Translation Edit Rate with Targeted Human Annotation",\n author = "Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John",\n booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",\n month = aug # " 8-12",\n year = "2006",\n address = "Cambridge, Massachusetts, USA",\n publisher = "Association for Machine Translation in the Americas",\n url = "https://aclanthology.org/2006.amta-papers.25",\n pages = "223--231",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n' UpperCAmelCase : List[Any] = '\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n' UpperCAmelCase : int = '\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n \'score\' (float): TER score (num_edits / sum_ref_lengths * 100)\n \'num_edits\' (int): The cumulative number of edits\n \'ref_length\' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}\n\n Example 2:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}\n\n Example 3:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}\n\n Example 4:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}\n\n Example 5:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCamelCase__ ( datasets.Metric ): """simple docstring""" def lowerCamelCase__ ( self : str ): '''simple docstring''' if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ): raise ImportWarning( """To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n""" """You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="""http://www.cs.umd.edu/~snover/tercom/""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""string""" , id="""sequence""" ), """references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ), } ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#ter"""] , reference_urls=[ """https://github.com/jhclark/tercom""", ] , ) def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : int , UpperCamelCase : Dict , UpperCamelCase : bool = False , UpperCamelCase : bool = False , UpperCamelCase : bool = False , UpperCamelCase : bool = False , ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = len(references[0] ) if any(len(UpperCamelCase ) != references_per_prediction for refs in references ): raise ValueError("""Sacrebleu requires the same number of references for each prediction""" ) __UpperCAmelCase : Any = [[refs[i] for refs in references] for i in range(UpperCamelCase )] __UpperCAmelCase : List[str] = TER( normalized=UpperCamelCase , no_punct=UpperCamelCase , asian_support=UpperCamelCase , case_sensitive=UpperCamelCase , ) __UpperCAmelCase : Tuple = sb_ter.corpus_score(UpperCamelCase , UpperCamelCase ) return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
139
"""simple docstring""" import sacrebleu as scb from packaging import version from sacrebleu import TER import datasets UpperCAmelCase : Dict = '\\n@inproceedings{snover-etal-2006-study,\n title = "A Study of Translation Edit Rate with Targeted Human Annotation",\n author = "Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John",\n booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",\n month = aug # " 8-12",\n year = "2006",\n address = "Cambridge, Massachusetts, USA",\n publisher = "Association for Machine Translation in the Americas",\n url = "https://aclanthology.org/2006.amta-papers.25",\n pages = "223--231",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n' UpperCAmelCase : List[Any] = '\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n' UpperCAmelCase : int = '\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n \'score\' (float): TER score (num_edits / sum_ref_lengths * 100)\n \'num_edits\' (int): The cumulative number of edits\n \'ref_length\' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}\n\n Example 2:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}\n\n Example 3:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}\n\n Example 4:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}\n\n Example 5:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCamelCase__ ( datasets.Metric ): """simple docstring""" def lowerCamelCase__ ( self : str ): '''simple docstring''' if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ): raise ImportWarning( """To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n""" """You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="""http://www.cs.umd.edu/~snover/tercom/""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""string""" , id="""sequence""" ), """references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ), } ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#ter"""] , reference_urls=[ """https://github.com/jhclark/tercom""", ] , ) def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : int , UpperCamelCase : Dict , UpperCamelCase : bool = False , UpperCamelCase : bool = False , UpperCamelCase : bool = False , UpperCamelCase : bool = False , ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = len(references[0] ) if any(len(UpperCamelCase ) != references_per_prediction for refs in references ): raise ValueError("""Sacrebleu requires the same number of references for each prediction""" ) __UpperCAmelCase : Any = [[refs[i] for refs in references] for i in range(UpperCamelCase )] __UpperCAmelCase : List[str] = TER( normalized=UpperCamelCase , no_punct=UpperCamelCase , asian_support=UpperCamelCase , case_sensitive=UpperCamelCase , ) __UpperCAmelCase : Tuple = sb_ter.corpus_score(UpperCamelCase , UpperCamelCase ) return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
139
1
'''simple docstring''' import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE_: Dict =logging.get_logger(__name__) SCREAMING_SNAKE_CASE_: Any ={ 'google/pix2struct-textcaps-base': ( 'https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json' ), } class __A ( UpperCamelCase__ ): a__ : List[str] = """pix2struct_text_model""" a__ : Optional[Any] = ["""past_key_values"""] a__ : Tuple = { """hidden_size""": """hidden_size""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers""", } def __init__(self : List[Any] , __a : Optional[Any]=50244 , __a : List[str]=768 , __a : Union[str, Any]=64 , __a : Tuple=2048 , __a : Union[str, Any]=12 , __a : Dict=12 , __a : Dict=32 , __a : Tuple=128 , __a : Optional[Any]=0.1 , __a : int=1E-6 , __a : Any=1.0 , __a : Any="gelu_new" , __a : List[Any]=0 , __a : Any=False , __a : Union[str, Any]=0 , __a : Tuple=1 , __a : List[Any]=False , __a : Union[str, Any]=True , **__a : Union[str, Any] , ): UpperCAmelCase_ = vocab_size UpperCAmelCase_ = hidden_size UpperCAmelCase_ = d_kv UpperCAmelCase_ = d_ff UpperCAmelCase_ = num_layers UpperCAmelCase_ = num_heads UpperCAmelCase_ = relative_attention_num_buckets UpperCAmelCase_ = relative_attention_max_distance UpperCAmelCase_ = dropout_rate UpperCAmelCase_ = layer_norm_epsilon UpperCAmelCase_ = initializer_factor UpperCAmelCase_ = use_cache UpperCAmelCase_ = eos_token_id UpperCAmelCase_ = decoder_start_token_id # for backwards compatibility UpperCAmelCase_ = dense_act_fn super().__init__( pad_token_id=__a , eos_token_id=__a , decoder_start_token_id=__a , tie_word_embeddings=__a , is_decoder=__a , **__a , ) @classmethod def _lowercase (cls : List[str] , __a : Union[str, os.PathLike] , **__a : List[str] ): cls._set_token_in_kwargs(__a ) UpperCAmelCase_ , UpperCAmelCase_ = cls.get_config_dict(__a , **__a ) # get the text config dict if we are loading from Pix2StructConfig if config_dict.get("model_type" ) == "pix2struct": UpperCAmelCase_ = config_dict["text_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(__a , **__a ) class __A ( UpperCamelCase__ ): a__ : Any = """pix2struct_vision_model""" def __init__(self : List[Any] , __a : Dict=768 , __a : Tuple=768 , __a : str=2048 , __a : List[Any]=64 , __a : Optional[int]=12 , __a : Optional[Any]=12 , __a : Union[str, Any]="gelu_new" , __a : Union[str, Any]=1E-6 , __a : List[str]=0.0 , __a : Union[str, Any]=0.0 , __a : Dict=1E-10 , __a : List[str]=1.0 , __a : Union[str, Any]=4096 , __a : Tuple=32 , __a : Any=128 , **__a : int , ): super().__init__(**__a ) UpperCAmelCase_ = hidden_size UpperCAmelCase_ = patch_embed_hidden_size UpperCAmelCase_ = d_ff UpperCAmelCase_ = dropout_rate UpperCAmelCase_ = num_hidden_layers UpperCAmelCase_ = num_attention_heads UpperCAmelCase_ = initializer_range UpperCAmelCase_ = initializer_factor UpperCAmelCase_ = attention_dropout UpperCAmelCase_ = layer_norm_eps UpperCAmelCase_ = dense_act_fn UpperCAmelCase_ = seq_len UpperCAmelCase_ = relative_attention_num_buckets UpperCAmelCase_ = relative_attention_max_distance UpperCAmelCase_ = d_kv @classmethod def _lowercase (cls : List[Any] , __a : Union[str, os.PathLike] , **__a : Optional[int] ): cls._set_token_in_kwargs(__a ) UpperCAmelCase_ , UpperCAmelCase_ = cls.get_config_dict(__a , **__a ) # get the vision config dict if we are loading from Pix2StructConfig if config_dict.get("model_type" ) == "pix2struct": UpperCAmelCase_ = config_dict["vision_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(__a , **__a ) class __A ( UpperCamelCase__ ): a__ : Union[str, Any] = """pix2struct""" a__ : Optional[int] = True def __init__(self : Dict , __a : Optional[int]=None , __a : Dict=None , __a : Optional[int]=1.0 , __a : int=0.02 , __a : Any=False , __a : Optional[Any]=False , __a : int=True , **__a : List[str] , ): super().__init__(tie_word_embeddings=__a , is_encoder_decoder=__a , **__a ) if text_config is None: UpperCAmelCase_ = {} logger.info("text_config is None. Initializing the Pix2StructTextConfig with default values." ) if vision_config is None: UpperCAmelCase_ = {} logger.info("vision_config is None. Initializing the Pix2StructVisionConfig with default values." ) UpperCAmelCase_ = PixaStructTextConfig(**__a ) UpperCAmelCase_ = PixaStructVisionConfig(**__a ) UpperCAmelCase_ = self.text_config.decoder_start_token_id UpperCAmelCase_ = self.text_config.pad_token_id UpperCAmelCase_ = self.text_config.eos_token_id UpperCAmelCase_ = initializer_factor UpperCAmelCase_ = initializer_range UpperCAmelCase_ = self.initializer_range UpperCAmelCase_ = self.initializer_range UpperCAmelCase_ = is_vqa @classmethod def _lowercase (cls : Dict , __a : PixaStructTextConfig , __a : PixaStructVisionConfig , **__a : int ): return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__a ) def _lowercase (self : Tuple ): UpperCAmelCase_ = copy.deepcopy(self.__dict__ ) UpperCAmelCase_ = self.text_config.to_dict() UpperCAmelCase_ = self.vision_config.to_dict() UpperCAmelCase_ = self.__class__.model_type return output
415
'''simple docstring''' import sys SCREAMING_SNAKE_CASE_: Optional[int] =( '73167176531330624919225119674426574742355349194934' '96983520312774506326239578318016984801869478851843' '85861560789112949495459501737958331952853208805511' '12540698747158523863050715693290963295227443043557' '66896648950445244523161731856403098711121722383113' '62229893423380308135336276614282806444486645238749' '30358907296290491560440772390713810515859307960866' '70172427121883998797908792274921901699720888093776' '65727333001053367881220235421809751254540594752243' '52584907711670556013604839586446706324415722155397' '53697817977846174064955149290862569321978468622482' '83972241375657056057490261407972968652414535100474' '82166370484403199890008895243450658541227588666881' '16427171479924442928230863465674813919123162824586' '17866458359124566529476545682848912883142607690042' '24219022671055626321111109370544217506941658960408' '07198403850962455444362981230987879927244284909188' '84580156166097919133875499200524063689912560717606' '05886116467109405077541002256983155200055935729725' '71636269561882670428252483600823257530420752963450' ) def lowerCAmelCase_ ( snake_case_ : str = N ) -> int: '''simple docstring''' UpperCAmelCase_ = -sys.maxsize - 1 for i in range(len(snake_case_ ) - 12 ): UpperCAmelCase_ = 1 for j in range(13 ): product *= int(n[i + j] ) if product > largest_product: UpperCAmelCase_ = product return largest_product if __name__ == "__main__": print(f"{solution() = }")
415
1
"""simple docstring""" import argparse import ast import logging import os import sys import pandas as pd import torch from tqdm import tqdm from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration from transformers import logging as transformers_logging sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip A = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) transformers_logging.set_verbosity_info() def __A ( a_ :Optional[Any]) -> Optional[Any]: if "token" in model_name_or_path: return "rag_token" if "sequence" in model_name_or_path: return "rag_sequence" if "bart" in model_name_or_path: return "bart" return None def __A ( a_ :Optional[int] , a_ :Optional[int] , a_ :Optional[Any]) -> int: return max(metric_fn(a_ , a_) for gt in ground_truths) def __A ( a_ :List[str] , a_ :List[str] , a_ :Optional[Any]) -> Dict: __a : List[Any] = [line.strip() for line in open(a_ , '''r''').readlines()] __a : Dict = [] if args.gold_data_mode == "qa": __a : Dict = pd.read_csv(a_ , sep='''\t''' , header=a_) for answer_list in data[1]: __a : List[str] = ast.literal_eval(a_) answers.append(a_) else: __a : Union[str, Any] = [line.strip() for line in open(a_ , '''r''').readlines()] __a : Union[str, Any] = [[reference] for reference in references] __a : Dict = 0 for prediction, ground_truths in zip(a_ , a_): total += 1 em += metric_max_over_ground_truths(a_ , a_ , a_) fa += metric_max_over_ground_truths(a_ , a_ , a_) __a : List[str] = 1_0_0.0 * em / total __a : List[str] = 1_0_0.0 * fa / total logger.info(F"""F1: {fa:.2f}""") logger.info(F"""EM: {em:.2f}""") def __A ( a_ :Union[str, Any] , a_ :List[Any] , a_ :Optional[Any]) -> Optional[int]: __a : Optional[int] = args.k __a : Optional[int] = [line.strip() for line in open(a_ , '''r''').readlines()] __a : Optional[Any] = [line.strip() for line in open(a_ , '''r''').readlines()] __a : Optional[int] = 0 for hypo, reference in zip(a_ , a_): __a : List[str] = set(hypo.split('''\t''')[:k]) __a : Dict = set(reference.split('''\t''')) total += 1 em += len(hypo_provenance & ref_provenance) / k __a : Tuple = 1_0_0.0 * em / total logger.info(F"""Precision@{k}: {em: .2f}""") def __A ( a_ :Dict , a_ :Any , a_ :int) -> int: def strip_title(a_ :List[str]): if title.startswith('''"'''): __a : str = title[1:] if title.endswith('''"'''): __a : Dict = title[:-1] return title __a : Tuple = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( a_ , return_tensors='''pt''' , padding=a_ , truncation=a_ , )['''input_ids'''].to(args.device) __a : Optional[Any] = rag_model.rag.question_encoder(a_) __a : Dict = question_enc_outputs[0] __a : List[str] = rag_model.retriever( a_ , question_enc_pool_output.cpu().detach().to(torch.floataa).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='''pt''' , ) __a : Optional[Any] = rag_model.retriever.index.get_doc_dicts(result.doc_ids) __a : Dict = [] for docs in all_docs: __a : Any = [strip_title(a_) for title in docs['''title''']] provenance_strings.append('''\t'''.join(a_)) return provenance_strings def __A ( a_ :Optional[Any] , a_ :str , a_ :str) -> List[str]: with torch.no_grad(): __a : Any = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( a_ , return_tensors='''pt''' , padding=a_ , truncation=a_) __a : Optional[int] = inputs_dict.input_ids.to(args.device) __a : List[Any] = inputs_dict.attention_mask.to(args.device) __a : List[Any] = rag_model.generate( # rag_model overwrites generate a_ , attention_mask=a_ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=a_ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , ) __a : Optional[Any] = rag_model.retriever.generator_tokenizer.batch_decode(a_ , skip_special_tokens=a_) if args.print_predictions: for q, a in zip(a_ , a_): logger.info('''Q: {} - A: {}'''.format(a_ , a_)) return answers def __A ( ) -> Union[str, Any]: __a : Dict = argparse.ArgumentParser() parser.add_argument( '''--model_type''' , choices=['''rag_sequence''', '''rag_token''', '''bart'''] , type=a_ , help=( '''RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the''' ''' model_name_or_path''' ) , ) parser.add_argument( '''--index_name''' , default=a_ , choices=['''exact''', '''compressed''', '''legacy'''] , type=a_ , help='''RAG model retriever type''' , ) parser.add_argument( '''--index_path''' , default=a_ , type=a_ , help='''Path to the retrieval index''' , ) parser.add_argument('''--n_docs''' , default=5 , type=a_ , help='''Number of retrieved docs''') parser.add_argument( '''--model_name_or_path''' , default=a_ , type=a_ , required=a_ , help='''Path to pretrained checkpoints or model identifier from huggingface.co/models''' , ) parser.add_argument( '''--eval_mode''' , choices=['''e2e''', '''retrieval'''] , default='''e2e''' , type=a_ , help=( '''Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates''' ''' precision@k.''' ) , ) parser.add_argument('''--k''' , default=1 , type=a_ , help='''k for the precision@k calculation''') parser.add_argument( '''--evaluation_set''' , default=a_ , type=a_ , required=a_ , help='''Path to a file containing evaluation samples''' , ) parser.add_argument( '''--gold_data_path''' , default=a_ , type=a_ , required=a_ , help='''Path to a tab-separated file with gold samples''' , ) parser.add_argument( '''--gold_data_mode''' , default='''qa''' , type=a_ , choices=['''qa''', '''ans'''] , help=( '''Format of the gold data file''' '''qa - a single line in the following format: question [tab] answer_list''' '''ans - a single line of the gold file contains the expected answer string''' ) , ) parser.add_argument( '''--predictions_path''' , type=a_ , default='''predictions.txt''' , help='''Name of the predictions file, to be stored in the checkpoints directory''' , ) parser.add_argument( '''--eval_all_checkpoints''' , action='''store_true''' , help='''Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number''' , ) parser.add_argument( '''--eval_batch_size''' , default=8 , type=a_ , help='''Batch size per GPU/CPU for evaluation.''' , ) parser.add_argument( '''--recalculate''' , help='''Recalculate predictions even if the prediction file exists''' , action='''store_true''' , ) parser.add_argument( '''--num_beams''' , default=4 , type=a_ , help='''Number of beams to be used when generating answers''' , ) parser.add_argument('''--min_length''' , default=1 , type=a_ , help='''Min length of the generated answers''') parser.add_argument('''--max_length''' , default=50 , type=a_ , help='''Max length of the generated answers''') parser.add_argument( '''--print_predictions''' , action='''store_true''' , help='''If True, prints predictions while evaluating.''' , ) parser.add_argument( '''--print_docs''' , action='''store_true''' , help='''If True, prints docs retried while generating.''' , ) __a : Optional[Any] = parser.parse_args() __a : Any = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''') return args def __A ( a_ :Optional[int]) -> str: __a : Any = {} if args.model_type is None: __a : List[Any] = infer_model_type(args.model_name_or_path) assert args.model_type is not None if args.model_type.startswith('''rag'''): __a : List[str] = RagTokenForGeneration if args.model_type == '''rag_token''' else RagSequenceForGeneration __a : Optional[int] = args.n_docs if args.index_name is not None: __a : Union[str, Any] = args.index_name if args.index_path is not None: __a : Tuple = args.index_path else: __a : Tuple = BartForConditionalGeneration __a : str = ( [f.path for f in os.scandir(args.model_name_or_path) if f.is_dir()] if args.eval_all_checkpoints else [args.model_name_or_path] ) logger.info('''Evaluate the following checkpoints: %s''' , a_) __a : Optional[int] = get_scores if args.eval_mode == '''e2e''' else get_precision_at_k __a : str = evaluate_batch_eae if args.eval_mode == '''e2e''' else evaluate_batch_retrieval for checkpoint in checkpoints: if os.path.exists(args.predictions_path) and (not args.recalculate): logger.info('''Calculating metrics based on an existing predictions file: {}'''.format(args.predictions_path)) score_fn(a_ , args.predictions_path , args.gold_data_path) continue logger.info('''***** Running evaluation for {} *****'''.format(a_)) logger.info(''' Batch size = %d''' , args.eval_batch_size) logger.info(''' Predictions will be stored under {}'''.format(args.predictions_path)) if args.model_type.startswith('''rag'''): __a : Optional[int] = RagRetriever.from_pretrained(a_ , **a_) __a : Tuple = model_class.from_pretrained(a_ , retriever=a_ , **a_) model.retriever.init_retrieval() else: __a : Dict = model_class.from_pretrained(a_ , **a_) model.to(args.device) with open(args.evaluation_set , '''r''') as eval_file, open(args.predictions_path , '''w''') as preds_file: __a : Any = [] for line in tqdm(a_): questions.append(line.strip()) if len(a_) == args.eval_batch_size: __a : Tuple = evaluate_batch_fn(a_ , a_ , a_) preds_file.write('''\n'''.join(a_) + '''\n''') preds_file.flush() __a : Dict = [] if len(a_) > 0: __a : Optional[Any] = evaluate_batch_fn(a_ , a_ , a_) preds_file.write('''\n'''.join(a_)) preds_file.flush() score_fn(a_ , args.predictions_path , args.gold_data_path) if __name__ == "__main__": A = get_args() main(args)
52
"""simple docstring""" from __future__ import annotations class __lowercase : '''simple docstring''' def __init__( self , _UpperCAmelCase , _UpperCAmelCase ): __a , __a : List[Any] = text, pattern __a , __a : Tuple = len(_UpperCAmelCase ), len(_UpperCAmelCase ) def _lowerCamelCase ( self , _UpperCAmelCase ): for i in range(self.patLen - 1 , -1 , -1 ): if char == self.pattern[i]: return i return -1 def _lowerCamelCase ( self , _UpperCAmelCase ): for i in range(self.patLen - 1 , -1 , -1 ): if self.pattern[i] != self.text[current_pos + i]: return current_pos + i return -1 def _lowerCamelCase ( self ): # searches pattern in text and returns index positions __a : Dict = [] for i in range(self.textLen - self.patLen + 1 ): __a : List[str] = self.mismatch_in_text(_UpperCAmelCase ) if mismatch_index == -1: positions.append(_UpperCAmelCase ) else: __a : Tuple = self.match_in_pattern(self.text[mismatch_index] ) __a : Optional[int] = ( mismatch_index - match_index ) # shifting index lgtm [py/multiple-definition] return positions A = '''ABAABA''' A = '''AB''' A = BoyerMooreSearch(text, pattern) A = bms.bad_character_heuristic() if len(positions) == 0: print('''No match found''') else: print('''Pattern found in following positions: ''') print(positions)
52
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase__ ={ 'configuration_instructblip': [ 'INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'InstructBlipConfig', 'InstructBlipQFormerConfig', 'InstructBlipVisionConfig', ], 'processing_instructblip': ['InstructBlipProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ =[ 'INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST', 'InstructBlipQFormerModel', 'InstructBlipPreTrainedModel', 'InstructBlipForConditionalGeneration', 'InstructBlipVisionModel', ] if TYPE_CHECKING: from .configuration_instructblip import ( INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, InstructBlipConfig, InstructBlipQFormerConfig, InstructBlipVisionConfig, ) from .processing_instructblip import InstructBlipProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_instructblip import ( INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST, InstructBlipForConditionalGeneration, InstructBlipPreTrainedModel, InstructBlipQFormerModel, InstructBlipVisionModel, ) else: import sys lowercase__ =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
511
'''simple docstring''' import logging import os from dataclasses import dataclass from enum import Enum from typing import List, Optional, Union from filelock import FileLock from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available lowercase__ =logging.getLogger(__name__) @dataclass class a_ : lowerCamelCase__ : str lowerCamelCase__ : List[str] lowerCamelCase__ : Optional[List[str]] @dataclass class a_ : lowerCamelCase__ : List[int] lowerCamelCase__ : List[int] lowerCamelCase__ : Optional[List[int]] = None lowerCamelCase__ : Optional[List[int]] = None class a_ ( UpperCamelCase__ ): lowerCamelCase__ : Any = 'train' lowerCamelCase__ : Optional[int] = 'dev' lowerCamelCase__ : int = 'test' class a_ : @staticmethod def lowerCAmelCase__ ( UpperCAmelCase , UpperCAmelCase ): raise NotImplementedError @staticmethod def lowerCAmelCase__ ( UpperCAmelCase ): raise NotImplementedError @staticmethod def lowerCAmelCase__ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False , UpperCAmelCase="[CLS]" , UpperCAmelCase=1 , UpperCAmelCase="[SEP]" , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=0 , UpperCAmelCase=0 , UpperCAmelCase=-1_00 , UpperCAmelCase=0 , UpperCAmelCase=True , ): a_ = {label: i for i, label in enumerate(UpperCAmelCase )} a_ = [] for ex_index, example in enumerate(UpperCAmelCase ): if ex_index % 1_00_00 == 0: logger.info("""Writing example %d of %d""" , UpperCAmelCase , len(UpperCAmelCase ) ) a_ = [] a_ = [] for word, label in zip(example.words , example.labels ): a_ = tokenizer.tokenize(UpperCAmelCase ) # bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space. if len(UpperCAmelCase ) > 0: tokens.extend(UpperCAmelCase ) # Use the real label id for the first token of the word, and padding ids for the remaining tokens label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(UpperCAmelCase ) - 1) ) # Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa. a_ = tokenizer.num_special_tokens_to_add() if len(UpperCAmelCase ) > max_seq_length - special_tokens_count: a_ = tokens[: (max_seq_length - special_tokens_count)] a_ = label_ids[: (max_seq_length - special_tokens_count)] # The convention in BERT is: # (a) For sequence pairs: # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 # (b) For single sequences: # tokens: [CLS] the dog is hairy . [SEP] # type_ids: 0 0 0 0 0 0 0 # # Where "type_ids" are used to indicate whether this is the first # sequence or the second sequence. The embedding vectors for `type=0` and # `type=1` were learned during pre-training and are added to the wordpiece # embedding vector (and position vector). This is not *strictly* necessary # since the [SEP] token unambiguously separates the sequences, but it makes # it easier for the model to learn the concept of sequences. # # For classification tasks, the first vector (corresponding to [CLS]) is # used as the "sentence vector". Note that this only makes sense because # the entire model is fine-tuned. tokens += [sep_token] label_ids += [pad_token_label_id] if sep_token_extra: # roberta uses an extra separator b/w pairs of sentences tokens += [sep_token] label_ids += [pad_token_label_id] a_ = [sequence_a_segment_id] * len(UpperCAmelCase ) if cls_token_at_end: tokens += [cls_token] label_ids += [pad_token_label_id] segment_ids += [cls_token_segment_id] else: a_ = [cls_token] + tokens a_ = [pad_token_label_id] + label_ids a_ = [cls_token_segment_id] + segment_ids a_ = tokenizer.convert_tokens_to_ids(UpperCAmelCase ) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. a_ = [1 if mask_padding_with_zero else 0] * len(UpperCAmelCase ) # Zero-pad up to the sequence length. a_ = max_seq_length - len(UpperCAmelCase ) if pad_on_left: a_ = ([pad_token] * padding_length) + input_ids a_ = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask a_ = ([pad_token_segment_id] * padding_length) + segment_ids a_ = ([pad_token_label_id] * padding_length) + label_ids else: input_ids += [pad_token] * padding_length input_mask += [0 if mask_padding_with_zero else 1] * padding_length segment_ids += [pad_token_segment_id] * padding_length label_ids += [pad_token_label_id] * padding_length assert len(UpperCAmelCase ) == max_seq_length assert len(UpperCAmelCase ) == max_seq_length assert len(UpperCAmelCase ) == max_seq_length assert len(UpperCAmelCase ) == max_seq_length if ex_index < 5: logger.info("""*** Example ***""" ) logger.info("""guid: %s""" , example.guid ) logger.info("""tokens: %s""" , """ """.join([str(UpperCAmelCase ) for x in tokens] ) ) logger.info("""input_ids: %s""" , """ """.join([str(UpperCAmelCase ) for x in input_ids] ) ) logger.info("""input_mask: %s""" , """ """.join([str(UpperCAmelCase ) for x in input_mask] ) ) logger.info("""segment_ids: %s""" , """ """.join([str(UpperCAmelCase ) for x in segment_ids] ) ) logger.info("""label_ids: %s""" , """ """.join([str(UpperCAmelCase ) for x in label_ids] ) ) if "token_type_ids" not in tokenizer.model_input_names: a_ = None features.append( InputFeatures( input_ids=UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , label_ids=UpperCAmelCase ) ) return features if is_torch_available(): import torch from torch import nn from torch.utils.data import Dataset class a_ ( UpperCamelCase__ ): lowerCamelCase__ : List[InputFeatures] lowerCamelCase__ : int = nn.CrossEntropyLoss().ignore_index def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase=False , UpperCAmelCase = Split.train , ): # Load data features from cache or dataset file a_ = os.path.join( UpperCAmelCase , """cached_{}_{}_{}""".format(mode.value , tokenizer.__class__.__name__ , str(UpperCAmelCase ) ) , ) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. a_ = cached_features_file + """.lock""" with FileLock(UpperCAmelCase ): if os.path.exists(UpperCAmelCase ) and not overwrite_cache: logger.info(f'''Loading features from cached file {cached_features_file}''' ) a_ = torch.load(UpperCAmelCase ) else: logger.info(f'''Creating features from dataset file at {data_dir}''' ) a_ = token_classification_task.read_examples_from_file(UpperCAmelCase , UpperCAmelCase ) # TODO clean up all this to leverage built-in features of tokenizers a_ = token_classification_task.convert_examples_to_features( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=UpperCAmelCase , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , ) logger.info(f'''Saving features into cached file {cached_features_file}''' ) torch.save(self.features , UpperCAmelCase ) def __len__( self ): return len(self.features ) def __getitem__( self , UpperCAmelCase ): return self.features[i] if is_tf_available(): import tensorflow as tf class a_ : lowerCamelCase__ : List[InputFeatures] lowerCamelCase__ : int = -100 def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase=False , UpperCAmelCase = Split.train , ): a_ = token_classification_task.read_examples_from_file(UpperCAmelCase , UpperCAmelCase ) # TODO clean up all this to leverage built-in features of tokenizers a_ = token_classification_task.convert_examples_to_features( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=UpperCAmelCase , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , ) def gen(): for ex in self.features: if ex.token_type_ids is None: yield ( {"input_ids": ex.input_ids, "attention_mask": ex.attention_mask}, ex.label_ids, ) else: yield ( { "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label_ids, ) if "token_type_ids" not in tokenizer.model_input_names: a_ = tf.data.Dataset.from_generator( UpperCAmelCase , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa}, tf.intaa) , ( {"""input_ids""": tf.TensorShape([None] ), """attention_mask""": tf.TensorShape([None] )}, tf.TensorShape([None] ), ) , ) else: a_ = tf.data.Dataset.from_generator( UpperCAmelCase , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa, """token_type_ids""": tf.intaa}, tf.intaa) , ( { """input_ids""": tf.TensorShape([None] ), """attention_mask""": tf.TensorShape([None] ), """token_type_ids""": tf.TensorShape([None] ), }, tf.TensorShape([None] ), ) , ) def lowerCAmelCase__ ( self ): a_ = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) ) return self.dataset def __len__( self ): return len(self.features ) def __getitem__( self , UpperCAmelCase ): return self.features[i]
511
1
'''simple docstring''' import re import time from typing import Optional import IPython.display as disp from ..trainer_callback import TrainerCallback from ..trainer_utils import IntervalStrategy, has_length def _SCREAMING_SNAKE_CASE ( __snake_case : str ): _A = int(A__ ) _A , _A , _A = t // 3_6_0_0, (t // 6_0) % 6_0, t % 6_0 return F'{h}:{m:02d}:{s:02d}' if h != 0 else F'{m:02d}:{s:02d}' def _SCREAMING_SNAKE_CASE ( __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : Tuple , __snake_case : Union[str, Any]=3_0_0 ): # docstyle-ignore return F'\n <div>\n {prefix}\n <progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>\n {label}\n </div>\n ' def _SCREAMING_SNAKE_CASE ( __snake_case : str ): _A = '<table border=\"1\" class=\"dataframe\">\n' html_code += """ <thead>\n <tr style="text-align: left;">\n""" for i in items[0]: html_code += F' <th>{i}</th>\n' html_code += " </tr>\n </thead>\n <tbody>\n" for line in items[1:]: html_code += " <tr>\n" for elt in line: _A = F'{elt:.6f}' if isinstance(A__ , A__ ) else str(A__ ) html_code += F' <td>{elt}</td>\n' html_code += " </tr>\n" html_code += " </tbody>\n</table><p>" return html_code class lowercase_ : """simple docstring""" __lowerCAmelCase = 5 __lowerCAmelCase = 0.2 def __init__( self : Optional[int], UpperCamelCase__ : int, UpperCamelCase__ : Optional[str] = None, UpperCamelCase__ : bool = True, UpperCamelCase__ : Optional["NotebookTrainingTracker"] = None, UpperCamelCase__ : int = 3_00, ) -> List[str]: _A = total _A = '' if prefix is None else prefix _A = leave _A = parent _A = width _A = None _A = None _A = None def __UpperCAmelCase ( self : Union[str, Any], UpperCamelCase__ : int, UpperCamelCase__ : bool = False, UpperCamelCase__ : str = None ) -> Any: _A = value if comment is not None: _A = comment if self.last_value is None: _A = _A = time.time() _A = _A = value _A = _A = None _A = self.warmup _A = 1 self.update_bar(__lowerCamelCase ) elif value <= self.last_value and not force_update: return elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for, self.total ): if self.first_calls > 0: self.first_calls -= 1 _A = time.time() _A = current_time - self.start_time # We could have value = self.start_value if the update is called twixe with the same start value. if value > self.start_value: _A = self.elapsed_time / (value - self.start_value) else: _A = None if value >= self.total: _A = self.total _A = None if not self.leave: self.close() elif self.average_time_per_item is not None: _A = self.average_time_per_item * (self.total - value) self.update_bar(__lowerCamelCase ) _A = value _A = current_time if self.average_time_per_item is None: _A = 1 else: _A = max(int(self.update_every / self.average_time_per_item ), 1 ) def __UpperCAmelCase ( self : str, UpperCamelCase__ : List[Any], UpperCamelCase__ : List[Any]=None ) -> Any: _A = ' ' * (len(str(self.total ) ) - len(str(__lowerCamelCase ) )) + str(__lowerCamelCase ) if self.elapsed_time is None: _A = f'[{spaced_value}/{self.total} : < :' elif self.predicted_remaining is None: _A = f'[{spaced_value}/{self.total} {format_time(self.elapsed_time )}' else: _A = ( f'[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <' f' {format_time(self.predicted_remaining )}' ) self.label += f', {1/self.average_time_per_item:.2f} it/s' self.label += "]" if self.comment is None or len(self.comment ) == 0 else f', {self.comment}]' self.display() def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]: _A = html_progress_bar(self.value, self.total, self.prefix, self.label, self.width ) if self.parent is not None: # If this is a child bar, the parent will take care of the display. self.parent.display() return if self.output is None: _A = disp.display(disp.HTML(self.html_code ), display_id=__lowerCamelCase ) else: self.output.update(disp.HTML(self.html_code ) ) def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]: if self.parent is None and self.output is not None: self.output.update(disp.HTML('' ) ) class lowercase_ ( __snake_case ): """simple docstring""" def __init__( self : int, UpperCamelCase__ : Optional[int], UpperCamelCase__ : List[str]=None ) -> Optional[int]: super().__init__(__lowerCamelCase ) _A = None if column_names is None else [column_names] _A = None def __UpperCAmelCase ( self : Any ) -> Any: _A = html_progress_bar(self.value, self.total, self.prefix, self.label, self.width ) if self.inner_table is not None: self.html_code += text_to_html_table(self.inner_table ) if self.child_bar is not None: self.html_code += self.child_bar.html_code if self.output is None: _A = disp.display(disp.HTML(self.html_code ), display_id=__lowerCamelCase ) else: self.output.update(disp.HTML(self.html_code ) ) def __UpperCAmelCase ( self : Optional[Any], UpperCamelCase__ : Optional[Any] ) -> str: if self.inner_table is None: _A = [list(values.keys() ), list(values.values() )] else: _A = self.inner_table[0] if len(self.inner_table ) == 1: # We give a chance to update the column names at the first iteration for key in values.keys(): if key not in columns: columns.append(__lowerCamelCase ) _A = columns self.inner_table.append([values[c] for c in columns] ) def __UpperCAmelCase ( self : Dict, UpperCamelCase__ : Any, UpperCamelCase__ : List[str]=None, UpperCamelCase__ : str=3_00 ) -> Optional[Any]: _A = NotebookProgressBar(__lowerCamelCase, prefix=__lowerCamelCase, parent=self, width=__lowerCamelCase ) return self.child_bar def __UpperCAmelCase ( self : Tuple ) -> Any: _A = None self.display() class lowercase_ ( __snake_case ): """simple docstring""" def __init__( self : Dict ) -> List[Any]: _A = None _A = None _A = False def __UpperCAmelCase ( self : Optional[int], UpperCamelCase__ : Optional[Any], UpperCamelCase__ : str, UpperCamelCase__ : str, **UpperCamelCase__ : Tuple ) -> str: _A = 'Epoch' if args.evaluation_strategy == IntervalStrategy.EPOCH else 'Step' _A = 0 _A = 0 _A = [self.first_column] + ['Training Loss'] if args.evaluation_strategy != IntervalStrategy.NO: column_names.append('Validation Loss' ) _A = NotebookTrainingTracker(state.max_steps, __lowerCamelCase ) def __UpperCAmelCase ( self : Dict, UpperCamelCase__ : Optional[Any], UpperCamelCase__ : Dict, UpperCamelCase__ : Tuple, **UpperCamelCase__ : List[str] ) -> str: _A = int(state.epoch ) if int(state.epoch ) == state.epoch else f'{state.epoch:.2f}' self.training_tracker.update( state.global_step + 1, comment=f'Epoch {epoch}/{state.num_train_epochs}', force_update=self._force_next_update, ) _A = False def __UpperCAmelCase ( self : Dict, UpperCamelCase__ : int, UpperCamelCase__ : List[str], UpperCamelCase__ : int, UpperCamelCase__ : Any=None, **UpperCamelCase__ : int ) -> str: if not has_length(__lowerCamelCase ): return if self.prediction_bar is None: if self.training_tracker is not None: _A = self.training_tracker.add_child(len(__lowerCamelCase ) ) else: _A = NotebookProgressBar(len(__lowerCamelCase ) ) self.prediction_bar.update(1 ) else: self.prediction_bar.update(self.prediction_bar.value + 1 ) def __UpperCAmelCase ( self : Union[str, Any], UpperCamelCase__ : Optional[int], UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : List[str], **UpperCamelCase__ : List[Any] ) -> Any: if self.prediction_bar is not None: self.prediction_bar.close() _A = None def __UpperCAmelCase ( self : Optional[Any], UpperCamelCase__ : str, UpperCamelCase__ : int, UpperCamelCase__ : Tuple, UpperCamelCase__ : Dict=None, **UpperCamelCase__ : Dict ) -> int: # Only for when there is no evaluation if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs: _A = {'Training Loss': logs['loss']} # First column is necessarily Step sine we're not in epoch eval strategy _A = state.global_step self.training_tracker.write_line(__lowerCamelCase ) def __UpperCAmelCase ( self : str, UpperCamelCase__ : str, UpperCamelCase__ : List[Any], UpperCamelCase__ : Any, UpperCamelCase__ : Optional[int]=None, **UpperCamelCase__ : int ) -> List[Any]: if self.training_tracker is not None: _A = {'Training Loss': 'No log', 'Validation Loss': 'No log'} for log in reversed(state.log_history ): if "loss" in log: _A = log['loss'] break if self.first_column == "Epoch": _A = int(state.epoch ) else: _A = state.global_step _A = 'eval' for k in metrics: if k.endswith('_loss' ): _A = re.sub(r'\_loss$', '', __lowerCamelCase ) _A = metrics.pop('total_flos', __lowerCamelCase ) _A = metrics.pop('epoch', __lowerCamelCase ) _A = metrics.pop(f'{metric_key_prefix}_runtime', __lowerCamelCase ) _A = metrics.pop(f'{metric_key_prefix}_samples_per_second', __lowerCamelCase ) _A = metrics.pop(f'{metric_key_prefix}_steps_per_second', __lowerCamelCase ) _A = metrics.pop(f'{metric_key_prefix}_jit_compilation_time', __lowerCamelCase ) for k, v in metrics.items(): if k == f'{metric_key_prefix}_loss': _A = v else: _A = k.split('_' ) _A = ' '.join([part.capitalize() for part in splits[1:]] ) _A = v self.training_tracker.write_line(__lowerCamelCase ) self.training_tracker.remove_child() _A = None # Evaluation takes a long time so we should force the next update. _A = True def __UpperCAmelCase ( self : Union[str, Any], UpperCamelCase__ : Dict, UpperCamelCase__ : Dict, UpperCamelCase__ : Dict, **UpperCamelCase__ : Any ) -> Dict: self.training_tracker.update( state.global_step, comment=f'Epoch {int(state.epoch )}/{state.num_train_epochs}', force_update=__lowerCamelCase ) _A = None
107
from ...configuration_utils import PretrainedConfig from ...utils import logging __A : Optional[int] = logging.get_logger(__name__) __A : Optional[Any] = { 'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json', # See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox } class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = "gpt_neox" def __init__( self : Optional[int] , __lowerCamelCase : List[str]=50432 , __lowerCamelCase : int=6144 , __lowerCamelCase : Optional[Any]=44 , __lowerCamelCase : Tuple=64 , __lowerCamelCase : Optional[int]=24576 , __lowerCamelCase : List[str]="gelu" , __lowerCamelCase : Any=0.25 , __lowerCamelCase : List[Any]=10000 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : int=0.0 , __lowerCamelCase : str=0.1 , __lowerCamelCase : List[Any]=2048 , __lowerCamelCase : Tuple=0.02 , __lowerCamelCase : Tuple=1e-5 , __lowerCamelCase : Dict=True , __lowerCamelCase : int=0 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : List[str]=False , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Optional[int]=None , **__lowerCamelCase : str , ): super().__init__(bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase ) SCREAMING_SNAKE_CASE = vocab_size SCREAMING_SNAKE_CASE = max_position_embeddings SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = num_hidden_layers SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = intermediate_size SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = rotary_pct SCREAMING_SNAKE_CASE = rotary_emb_base SCREAMING_SNAKE_CASE = attention_dropout SCREAMING_SNAKE_CASE = hidden_dropout SCREAMING_SNAKE_CASE = classifier_dropout SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = layer_norm_eps SCREAMING_SNAKE_CASE = use_cache SCREAMING_SNAKE_CASE = tie_word_embeddings SCREAMING_SNAKE_CASE = use_parallel_residual SCREAMING_SNAKE_CASE = rope_scaling self._rope_scaling_validation() if self.hidden_size % self.num_attention_heads != 0: raise ValueError( "The hidden size is not divisble by the number of attention heads! Make sure to update them!" ) def _snake_case ( self : Union[str, Any] ): if self.rope_scaling is None: return if not isinstance(self.rope_scaling , __lowerCamelCase ) or len(self.rope_scaling ) != 2: raise ValueError( "`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, " f"got {self.rope_scaling}" ) SCREAMING_SNAKE_CASE = self.rope_scaling.get("type" , __lowerCamelCase ) SCREAMING_SNAKE_CASE = self.rope_scaling.get("factor" , __lowerCamelCase ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" ) if rope_scaling_factor is None or not isinstance(__lowerCamelCase , __lowerCamelCase ) or rope_scaling_factor <= 1.0: raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
16
0
import copy from typing import Any, Dict, List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging lowercase : Optional[Any] = logging.get_logger(__name__) class __lowercase ( UpperCAmelCase__ ): """simple docstring""" UpperCAmelCase_ : List[str] = ['''input_features'''] def __init__( self , __UpperCAmelCase=80 , __UpperCAmelCase=1_60_00 , __UpperCAmelCase=1_60 , __UpperCAmelCase=30 , __UpperCAmelCase=4_00 , __UpperCAmelCase=0.0 , __UpperCAmelCase=False , **__UpperCAmelCase , ) -> str: super().__init__( feature_size=__lowerCAmelCase , sampling_rate=__lowerCAmelCase , padding_value=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , **__lowerCAmelCase , ) A : Dict = n_fft A : int = hop_length A : Optional[int] = chunk_length A : Any = chunk_length * sampling_rate A : List[str] = self.n_samples // hop_length A : Any = sampling_rate A : Optional[Any] = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__lowerCAmelCase , min_frequency=0.0 , max_frequency=80_00.0 , sampling_rate=__lowerCAmelCase , norm='''slaney''' , mel_scale='''slaney''' , ) def snake_case ( self , __UpperCAmelCase ) -> np.ndarray: A : List[Any] = spectrogram( __lowerCAmelCase , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='''log10''' , ) A : Any = log_spec[:, :-1] A : int = np.maximum(__lowerCAmelCase , log_spec.max() - 8.0 ) A : int = (log_spec + 4.0) / 4.0 return log_spec @staticmethod # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm def snake_case ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0.0 ) -> List[np.ndarray]: if attention_mask is not None: A : Tuple = np.array(__lowerCAmelCase , np.intaa ) A : Union[str, Any] = [] for vector, length in zip(__lowerCAmelCase , attention_mask.sum(-1 ) ): A : Dict = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 ) if length < normed_slice.shape[0]: A : List[Any] = padding_value normed_input_values.append(__lowerCAmelCase ) else: A : List[Any] = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values] return normed_input_values def __call__( self , __UpperCAmelCase , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = "max_length" , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> BatchFeature: if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a' f' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input' f' was sampled with {self.sampling_rate} and not {sampling_rate}.' ) else: logger.warning( '''It is strongly recommended to pass the `sampling_rate` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) A : List[str] = isinstance(__lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f'Only mono-channel audio is supported for input to {self}' ) A : List[str] = is_batched_numpy or ( isinstance(__lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: A : List[Any] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(__lowerCAmelCase , np.ndarray ): A : int = np.asarray(__lowerCAmelCase , dtype=np.floataa ) elif isinstance(__lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): A : str = raw_speech.astype(np.floataa ) # always return batch if not is_batched: A : Any = [np.asarray([raw_speech] ).T] A : str = BatchFeature({'''input_features''': raw_speech} ) # convert into correct format for padding A : Dict = self.pad( __lowerCAmelCase , padding=__lowerCAmelCase , max_length=max_length if max_length else self.n_samples , truncation=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_attention_mask=return_attention_mask or do_normalize , ) # zero-mean and unit-variance normalization if do_normalize: A : Any = self.zero_mean_unit_var_norm( padded_inputs['''input_features'''] , attention_mask=padded_inputs['''attention_mask'''] , padding_value=self.padding_value , ) A : Optional[int] = np.stack(padded_inputs['''input_features'''] , axis=0 ) # make sure list is in array format A : List[str] = padded_inputs.get('''input_features''' ).transpose(2 , 0 , 1 ) A : int = [self._np_extract_fbank_features(__lowerCAmelCase ) for waveform in input_features[0]] if isinstance(input_features[0] , __lowerCAmelCase ): A : Any = [np.asarray(__lowerCAmelCase , dtype=np.floataa ) for feature in input_features] else: A : Optional[int] = input_features if return_attention_mask: # rescale from sample (48000) to feature (3000) A : List[str] = padded_inputs['''attention_mask'''][:, :: self.hop_length] if return_tensors is not None: A : Tuple = padded_inputs.convert_to_tensors(__lowerCAmelCase ) return padded_inputs def snake_case ( self ) -> Dict[str, Any]: A : Dict = copy.deepcopy(self.__dict__ ) A : Union[str, Any] = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] return output
710
import heapq import sys import numpy as np lowercase : Optional[int] = tuple[int, int] class __lowercase : """simple docstring""" def __init__( self ) -> List[str]: A : List[str] = [] A : str = set() def snake_case ( self ) -> str: if not self.empty(): return self.elements[0][0] else: return float('''inf''' ) def snake_case ( self ) -> Dict: return len(self.elements ) == 0 def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple: if item not in self.set: heapq.heappush(self.elements , (priority, item) ) self.set.add(__UpperCAmelCase ) else: # update # print("update", item) A : int = [] ((A) , (A)) : Tuple = heapq.heappop(self.elements ) while x != item: temp.append((pri, x) ) ((A) , (A)) : Any = heapq.heappop(self.elements ) temp.append((priority, item) ) for pro, xxx in temp: heapq.heappush(self.elements , (pro, xxx) ) def snake_case ( self , __UpperCAmelCase ) -> Dict: if item in self.set: self.set.remove(__UpperCAmelCase ) A : str = [] ((A) , (A)) : List[Any] = heapq.heappop(self.elements ) while x != item: temp.append((pro, x) ) ((A) , (A)) : Optional[Any] = heapq.heappop(self.elements ) for prito, yyy in temp: heapq.heappush(self.elements , (prito, yyy) ) def snake_case ( self ) -> List[str]: return self.elements[0][1] def snake_case ( self ) -> Optional[int]: ((A) , (A)) : int = heapq.heappop(self.elements ) self.set.remove(__UpperCAmelCase ) return (priority, item) def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ ): # euclidean distance A : int = np.array(lowerCamelCase_ ) A : Tuple = np.array(lowerCamelCase_ ) return np.linalg.norm(a - b ) def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ ): # integer division by time variable return consistent_heuristic(lowerCamelCase_ , lowerCamelCase_ ) // t def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ ): # manhattan distance return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] ) def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): A : List[Any] = g_function[start] + Wa * heuristics[i](lowerCamelCase_ , lowerCamelCase_ ) return ans def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): A : Union[str, Any] = np.chararray((n, n) ) for i in range(lowerCamelCase_ ): for j in range(lowerCamelCase_ ): A : List[str] = '''*''' for i in range(lowerCamelCase_ ): for j in range(lowerCamelCase_ ): if (j, (n - 1) - i) in blocks: A : List[Any] = '''#''' A : Tuple = '''-''' A : Optional[Any] = back_pointer[goal] while x != start: ((A) , (A)) : Union[str, Any] = x # print(x) A : str = '''-''' A : Union[str, Any] = back_pointer[x] A : Union[str, Any] = '''-''' for i in range(lowerCamelCase_ ): for j in range(lowerCamelCase_ ): if (i, j) == (0, n - 1): print(grid[i][j] , end=''' ''' ) print('''<-- End position''' , end=''' ''' ) else: print(grid[i][j] , end=''' ''' ) print() print('''^''' ) print('''Start position''' ) print() print('''# is an obstacle''' ) print('''- is the path taken by algorithm''' ) print('''PATH TAKEN BY THE ALGORITHM IS:-''' ) A : Any = back_pointer[goal] while x != start: print(lowerCamelCase_ , end=''' ''' ) A : List[Any] = back_pointer[x] print(lowerCamelCase_ ) sys.exit() def snake_case__ ( lowerCamelCase_ ): if p[0] < 0 or p[0] > n - 1: return False if p[1] < 0 or p[1] > n - 1: return False return True def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ): for itera in range(lowerCamelCase_ ): open_list[itera].remove_element(lowerCamelCase_ ) # print("s", s) # print("j", j) ((A) , (A)) : Tuple = s A : Any = (x - 1, y) A : Dict = (x + 1, y) A : Union[str, Any] = (x, y + 1) A : Tuple = (x, y - 1) for neighbours in [left, right, up, down]: if neighbours not in blocks: if valid(lowerCamelCase_ ) and neighbours not in visited: # print("neighbour", neighbours) visited.add(lowerCamelCase_ ) A : Optional[int] = -1 A : Dict = float('''inf''' ) if valid(lowerCamelCase_ ) and g_function[neighbours] > g_function[s] + 1: A : Optional[int] = g_function[s] + 1 A : Any = s if neighbours not in close_list_anchor: open_list[0].put(lowerCamelCase_ , key(lowerCamelCase_ , 0 , lowerCamelCase_ , lowerCamelCase_ ) ) if neighbours not in close_list_inad: for var in range(1 , lowerCamelCase_ ): if key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) <= Wa * key( lowerCamelCase_ , 0 , lowerCamelCase_ , lowerCamelCase_ ): open_list[j].put( lowerCamelCase_ , key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) ) def snake_case__ ( ): A : Dict = [] for x in range(1 , 5 ): for y in range(1 , 6 ): some_list.append((x, y) ) for x in range(15 , 20 ): some_list.append((x, 17) ) for x in range(10 , 19 ): for y in range(1 , 15 ): some_list.append((x, y) ) # L block for x in range(1 , 4 ): for y in range(12 , 19 ): some_list.append((x, y) ) for x in range(3 , 13 ): for y in range(16 , 19 ): some_list.append((x, y) ) return some_list lowercase : Dict = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a} lowercase : Dict = [ (0, 1), (1, 1), (2, 1), (3, 1), (4, 1), (5, 1), (6, 1), (7, 1), (8, 1), (9, 1), (10, 1), (11, 1), (12, 1), (13, 1), (14, 1), (15, 1), (16, 1), (17, 1), (18, 1), (19, 1), ] lowercase : int = make_common_ground() lowercase : Optional[int] = blocks_blk # hyper parameters lowercase : Dict = 1 lowercase : int = 1 lowercase : str = 20 lowercase : Optional[int] = 3 # one consistent and two other inconsistent # start and end destination lowercase : List[Any] = (0, 0) lowercase : Dict = (n - 1, n - 1) lowercase : Any = 1 def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): A : Any = {start: 0, goal: float('''inf''' )} A : Optional[Any] = {start: -1, goal: -1} A : List[Any] = [] A : str = set() for i in range(lowerCamelCase_ ): open_list.append(PriorityQueue() ) open_list[i].put(lowerCamelCase_ , key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) ) A : list[int] = [] A : list[int] = [] while open_list[0].minkey() < float('''inf''' ): for i in range(1 , lowerCamelCase_ ): # print(open_list[0].minkey(), open_list[i].minkey()) if open_list[i].minkey() <= Wa * open_list[0].minkey(): global t t += 1 if g_function[goal] <= open_list[i].minkey(): if g_function[goal] < float('''inf''' ): do_something(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) else: A , A : Tuple = open_list[i].top_show() visited.add(lowerCamelCase_ ) expand_state( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) close_list_inad.append(lowerCamelCase_ ) else: if g_function[goal] <= open_list[0].minkey(): if g_function[goal] < float('''inf''' ): do_something(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) else: A : Any = open_list[0].top_show() visited.add(lowerCamelCase_ ) expand_state( lowerCamelCase_ , 0 , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) close_list_anchor.append(lowerCamelCase_ ) print('''No path found to goal''' ) print() for i in range(n - 1 , -1 , -1 ): for j in range(lowerCamelCase_ ): if (j, i) in blocks: print('''#''' , end=''' ''' ) elif (j, i) in back_pointer: if (j, i) == (n - 1, n - 1): print('''*''' , end=''' ''' ) else: print('''-''' , end=''' ''' ) else: print('''*''' , end=''' ''' ) if (j, i) == (n - 1, n - 1): print('''<-- End position''' , end=''' ''' ) print() print('''^''' ) print('''Start position''' ) print() print('''# is an obstacle''' ) print('''- is the path taken by algorithm''' ) if __name__ == "__main__": multi_a_star(start, goal, n_heuristic)
423
0
"""simple docstring""" import unittest from parameterized import parameterized from transformers import OpenLlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel class lowercase_ : '''simple docstring''' def __init__( self : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any]=13 , _UpperCAmelCase : int=7 , _UpperCAmelCase : Any=True , _UpperCAmelCase : str=True , _UpperCAmelCase : Optional[int]=False , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : Tuple=99 , _UpperCAmelCase : int=32 , _UpperCAmelCase : Optional[int]=5 , _UpperCAmelCase : Union[str, Any]=4 , _UpperCAmelCase : List[str]=37 , _UpperCAmelCase : Any="gelu" , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : Union[str, Any]=0.1 , _UpperCAmelCase : Dict=512 , _UpperCAmelCase : Any=16 , _UpperCAmelCase : str=2 , _UpperCAmelCase : Union[str, Any]=0.02 , _UpperCAmelCase : Any=3 , _UpperCAmelCase : List[Any]=4 , _UpperCAmelCase : Dict=None , ): _A = parent _A = batch_size _A = seq_length _A = is_training _A = use_input_mask _A = use_token_type_ids _A = use_labels _A = vocab_size _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_act _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = max_position_embeddings _A = type_vocab_size _A = type_sequence_label_size _A = initializer_range _A = num_labels _A = num_choices _A = scope def lowerCAmelCase_ ( self : Any ): _A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _A = None if self.use_input_mask: _A = random_attention_mask([self.batch_size, self.seq_length] ) _A = None if self.use_token_type_ids: _A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _A = None _A = None _A = None if self.use_labels: _A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _A = ids_tensor([self.batch_size] , self.num_choices ) _A = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase_ ( self : str ): return OpenLlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , use_stable_embedding=_UpperCAmelCase , ) def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Any ): _A = OpenLlamaModel(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _A = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase ) _A = model(_UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] , ): _A = True _A = OpenLlamaModel(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _A = model( _UpperCAmelCase , attention_mask=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , ) _A = model( _UpperCAmelCase , attention_mask=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , ) _A = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] , ): _A = OpenLlamaForCausalLM(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _A = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any , ): _A = True _A = True _A = OpenLlamaForCausalLM(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() # first forward pass _A = model( _UpperCAmelCase , attention_mask=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , use_cache=_UpperCAmelCase , ) _A = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids _A = ids_tensor((self.batch_size, 3) , config.vocab_size ) _A = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and _A = torch.cat([input_ids, next_tokens] , dim=-1 ) _A = torch.cat([input_mask, next_mask] , dim=-1 ) _A = model( _UpperCAmelCase , attention_mask=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , )['hidden_states'][0] _A = model( _UpperCAmelCase , attention_mask=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , )['hidden_states'][0] # select random slice _A = ids_tensor((1,) , output_from_past.shape[-1] ).item() _A = output_from_no_past[:, -3:, random_slice_idx].detach() _A = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 ) ) def lowerCAmelCase_ ( self : List[str] ): _A = self.prepare_config_and_inputs() ( ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ) = config_and_inputs _A = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class lowercase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ): '''simple docstring''' UpperCAmelCase : List[str] = ( (OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else () ) UpperCAmelCase : List[str] = (OpenLlamaForCausalLM,) if is_torch_available() else () UpperCAmelCase : str = ( { '''feature-extraction''': OpenLlamaModel, '''text-classification''': OpenLlamaForSequenceClassification, '''text-generation''': OpenLlamaForCausalLM, '''zero-shot''': OpenLlamaForSequenceClassification, } if is_torch_available() else {} ) UpperCAmelCase : Any = False UpperCAmelCase : Any = False def lowerCAmelCase_ ( self : Union[str, Any] ): _A = OpenLlamaModelTester(self ) _A = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 ) def lowerCAmelCase_ ( self : List[Any] ): self.config_tester.run_common_tests() def lowerCAmelCase_ ( self : Optional[Any] ): _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCAmelCase ) def lowerCAmelCase_ ( self : Tuple ): _A = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: _A = type self.model_tester.create_and_check_model(*_UpperCAmelCase ) def lowerCAmelCase_ ( self : List[Any] ): _A , _A = self.model_tester.prepare_config_and_inputs_for_common() _A = 3 _A = input_dict['input_ids'] _A = input_ids.ne(1 ).to(_UpperCAmelCase ) _A = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) _A = OpenLlamaForSequenceClassification(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _A = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def lowerCAmelCase_ ( self : Dict ): _A , _A = self.model_tester.prepare_config_and_inputs_for_common() _A = 3 _A = 'single_label_classification' _A = input_dict['input_ids'] _A = input_ids.ne(1 ).to(_UpperCAmelCase ) _A = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) _A = OpenLlamaForSequenceClassification(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _A = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def lowerCAmelCase_ ( self : Union[str, Any] ): _A , _A = self.model_tester.prepare_config_and_inputs_for_common() _A = 3 _A = 'multi_label_classification' _A = input_dict['input_ids'] _A = input_ids.ne(1 ).to(_UpperCAmelCase ) _A = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) _A = OpenLlamaForSequenceClassification(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _A = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip('Open-Llama buffers include complex numbers, which breaks this test' ) def lowerCAmelCase_ ( self : List[str] ): pass @parameterized.expand([('linear',), ('dynamic',)] ) def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : Union[str, Any] ): _A , _A = self.model_tester.prepare_config_and_inputs_for_common() _A = ids_tensor([1, 10] , config.vocab_size ) _A = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights _A = OpenLlamaModel(_UpperCAmelCase ) original_model.to(_UpperCAmelCase ) original_model.eval() _A = original_model(_UpperCAmelCase ).last_hidden_state _A = original_model(_UpperCAmelCase ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights _A = {'type': scaling_type, 'factor': 10.0} _A = OpenLlamaModel(_UpperCAmelCase ) scaled_model.to(_UpperCAmelCase ) scaled_model.eval() _A = scaled_model(_UpperCAmelCase ).last_hidden_state _A = scaled_model(_UpperCAmelCase ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-5 ) ) else: self.assertFalse(torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-5 ) )
7
import logging import os from typing import List, TextIO, Union from conllu import parse_incr from utils_ner import InputExample, Split, TokenClassificationTask SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__) class _UpperCAmelCase ( lowercase ): def __init__( self : Optional[int] , UpperCAmelCase : Any=-1): # in NER datasets, the last column is usually reserved for NER label SCREAMING_SNAKE_CASE_ :Tuple = label_idx def _snake_case ( self : Tuple , UpperCAmelCase : Any , UpperCAmelCase : Union[Split, str]): if isinstance(UpperCAmelCase , UpperCAmelCase): SCREAMING_SNAKE_CASE_ :List[Any] = mode.value SCREAMING_SNAKE_CASE_ :Optional[Any] = os.path.join(UpperCAmelCase , F"{mode}.txt") SCREAMING_SNAKE_CASE_ :Tuple = 1 SCREAMING_SNAKE_CASE_ :str = [] with open(UpperCAmelCase , encoding="utf-8") as f: SCREAMING_SNAKE_CASE_ :Tuple = [] SCREAMING_SNAKE_CASE_ :int = [] for line in f: if line.startswith("-DOCSTART-") or line == "" or line == "\n": if words: examples.append(InputExample(guid=F"{mode}-{guid_index}" , words=UpperCAmelCase , labels=UpperCAmelCase)) guid_index += 1 SCREAMING_SNAKE_CASE_ :Tuple = [] SCREAMING_SNAKE_CASE_ :Any = [] else: SCREAMING_SNAKE_CASE_ :int = line.split(" ") words.append(splits[0]) if len(UpperCAmelCase) > 1: labels.append(splits[self.label_idx].replace("\n" , "")) else: # Examples could have no label for mode = "test" labels.append("O") if words: examples.append(InputExample(guid=F"{mode}-{guid_index}" , words=UpperCAmelCase , labels=UpperCAmelCase)) return examples def _snake_case ( self : List[Any] , UpperCAmelCase : TextIO , UpperCAmelCase : TextIO , UpperCAmelCase : List): SCREAMING_SNAKE_CASE_ :Union[str, Any] = 0 for line in test_input_reader: if line.startswith("-DOCSTART-") or line == "" or line == "\n": writer.write(UpperCAmelCase) if not preds_list[example_id]: example_id += 1 elif preds_list[example_id]: SCREAMING_SNAKE_CASE_ :Union[str, Any] = line.split()[0] + " " + preds_list[example_id].pop(0) + "\n" writer.write(UpperCAmelCase) else: logger.warning("Maximum sequence length exceeded: No prediction for '%s'." , line.split()[0]) def _snake_case ( self : List[str] , UpperCAmelCase : str): if path: with open(UpperCAmelCase , "r") as f: SCREAMING_SNAKE_CASE_ :Any = f.read().splitlines() if "O" not in labels: SCREAMING_SNAKE_CASE_ :Union[str, Any] = ["O"] + labels return labels else: return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] class _UpperCAmelCase ( lowercase ): def __init__( self : Dict): # in CONLL2003 dataset chunk column is second-to-last super().__init__(label_idx=-2) def _snake_case ( self : Union[str, Any] , UpperCAmelCase : str): if path: with open(UpperCAmelCase , "r") as f: SCREAMING_SNAKE_CASE_ :Optional[int] = f.read().splitlines() if "O" not in labels: SCREAMING_SNAKE_CASE_ :Dict = ["O"] + labels return labels else: return [ "O", "B-ADVP", "B-INTJ", "B-LST", "B-PRT", "B-NP", "B-SBAR", "B-VP", "B-ADJP", "B-CONJP", "B-PP", "I-ADVP", "I-INTJ", "I-LST", "I-PRT", "I-NP", "I-SBAR", "I-VP", "I-ADJP", "I-CONJP", "I-PP", ] class _UpperCAmelCase ( lowercase ): def _snake_case ( self : int , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[Split, str]): if isinstance(UpperCAmelCase , UpperCAmelCase): SCREAMING_SNAKE_CASE_ :List[str] = mode.value SCREAMING_SNAKE_CASE_ :List[str] = os.path.join(UpperCAmelCase , F"{mode}.txt") SCREAMING_SNAKE_CASE_ :Dict = 1 SCREAMING_SNAKE_CASE_ :List[str] = [] with open(UpperCAmelCase , encoding="utf-8") as f: for sentence in parse_incr(UpperCAmelCase): SCREAMING_SNAKE_CASE_ :List[str] = [] SCREAMING_SNAKE_CASE_ :int = [] for token in sentence: words.append(token["form"]) labels.append(token["upos"]) assert len(UpperCAmelCase) == len(UpperCAmelCase) if words: examples.append(InputExample(guid=F"{mode}-{guid_index}" , words=UpperCAmelCase , labels=UpperCAmelCase)) guid_index += 1 return examples def _snake_case ( self : List[Any] , UpperCAmelCase : TextIO , UpperCAmelCase : TextIO , UpperCAmelCase : List): SCREAMING_SNAKE_CASE_ :List[str] = 0 for sentence in parse_incr(UpperCAmelCase): SCREAMING_SNAKE_CASE_ :str = preds_list[example_id] SCREAMING_SNAKE_CASE_ :List[Any] = "" for token in sentence: out += F"{token['form']} ({token['upos']}|{s_p.pop(0)}) " out += "\n" writer.write(UpperCAmelCase) example_id += 1 def _snake_case ( self : Tuple , UpperCAmelCase : str): if path: with open(UpperCAmelCase , "r") as f: return f.read().splitlines() else: return [ "ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X", ]
631
0
'''simple docstring''' import re def _lowerCAmelCase ( lowercase : str ) ->bool: """simple docstring""" lowercase__ = re.compile(R'''^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$''' ) if match := re.search(lowercase , lowercase ): return match.string == phone return False if __name__ == "__main__": print(indian_phone_validator("+918827897895"))
318
'''simple docstring''' import argparse import torch from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert from transformers.utils import logging logging.set_verbosity_info() def _lowerCAmelCase ( lowercase : List[str] , lowercase : Dict , lowercase : int ) ->List[Any]: """simple docstring""" lowercase__ = BertConfig.from_json_file(lowercase ) print(F'''Building PyTorch model from configuration: {config}''' ) lowercase__ = BertForPreTraining(lowercase ) # Load weights from tf checkpoint load_tf_weights_in_bert(lowercase , lowercase , lowercase ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , lowercase ) if __name__ == "__main__": _lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--bert_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained BERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) _lowerCAmelCase = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
318
1
import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, TextToVideoSDPipeline, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class lowercase ( a_ , unittest.TestCase ): """simple docstring""" _UpperCamelCase : Any = TextToVideoSDPipeline _UpperCamelCase : Optional[Any] = TEXT_TO_IMAGE_PARAMS _UpperCamelCase : Optional[int] = TEXT_TO_IMAGE_BATCH_PARAMS # No `output_type`. _UpperCamelCase : Dict = frozenset( [ "num_inference_steps", "generator", "latents", "return_dict", "callback", "callback_steps", ] ) def __UpperCAmelCase ( self : Any ): '''simple docstring''' torch.manual_seed(0 ) _snake_case : Optional[int] = UNetaDConditionModel( block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D') , up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D') , cross_attention_dim=32 , attention_head_dim=4 , ) _snake_case : Tuple = DDIMScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=lowerCamelCase_ , set_alpha_to_one=lowerCamelCase_ , ) torch.manual_seed(0 ) _snake_case : List[str] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_28 , ) torch.manual_seed(0 ) _snake_case : Any = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='gelu' , projection_dim=5_12 , ) _snake_case : Tuple = CLIPTextModel(lowerCamelCase_ ) _snake_case : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) _snake_case : Union[str, Any] = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, } return components def __UpperCAmelCase ( self : int , lowerCamelCase_ : str , lowerCamelCase_ : Any=0 ): '''simple docstring''' if str(lowerCamelCase_ ).startswith('mps' ): _snake_case : Any = torch.manual_seed(lowerCamelCase_ ) else: _snake_case : Optional[Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ ) _snake_case : int = { 'prompt': 'A painting of a squirrel eating a burger', 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, 'output_type': 'pt', } return inputs def __UpperCAmelCase ( self : Any ): '''simple docstring''' _snake_case : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator _snake_case : List[str] = self.get_dummy_components() _snake_case : Dict = TextToVideoSDPipeline(**lowerCamelCase_ ) _snake_case : Optional[Any] = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) _snake_case : Dict = self.get_dummy_inputs(lowerCamelCase_ ) _snake_case : Optional[Any] = 'np' _snake_case : Dict = sd_pipe(**lowerCamelCase_ ).frames _snake_case : Optional[int] = frames[0][-3:, -3:, -1] assert frames[0].shape == (64, 64, 3) _snake_case : List[Any] = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def __UpperCAmelCase ( self : Tuple ): '''simple docstring''' self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowerCamelCase_ , expected_max_diff=3e-3 ) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def __UpperCAmelCase ( self : List[str] ): '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowerCamelCase_ , expected_max_diff=1e-2 ) @unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' ) def __UpperCAmelCase ( self : List[str] ): '''simple docstring''' pass @unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' ) def __UpperCAmelCase ( self : List[Any] ): '''simple docstring''' pass @unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' ) def __UpperCAmelCase ( self : Union[str, Any] ): '''simple docstring''' pass def __UpperCAmelCase ( self : Dict ): '''simple docstring''' return super().test_progress_bar() @slow @skip_mps class lowercase ( unittest.TestCase ): """simple docstring""" def __UpperCAmelCase ( self : Any ): '''simple docstring''' _snake_case : Union[str, Any] = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy' ) _snake_case : Dict = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' ) _snake_case : List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) _snake_case : str = pipe.to('cuda' ) _snake_case : str = 'Spiderman is surfing' _snake_case : Optional[int] = torch.Generator(device='cpu' ).manual_seed(0 ) _snake_case : Union[str, Any] = pipe(lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=25 , output_type='pt' ).frames _snake_case : List[Any] = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5e-2 def __UpperCAmelCase ( self : Tuple ): '''simple docstring''' _snake_case : str = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy' ) _snake_case : Any = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' ) _snake_case : Tuple = pipe.to('cuda' ) _snake_case : List[Any] = 'Spiderman is surfing' _snake_case : int = torch.Generator(device='cpu' ).manual_seed(0 ) _snake_case : Any = pipe(lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=2 , output_type='pt' ).frames _snake_case : Tuple = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5e-2
304
def A__( __lowerCAmelCase ): _snake_case : Optional[Any] = 0 while num > 0: digit_sum += num % 10 num //= 10 return digit_sum def A__( __lowerCAmelCase = 1_00 ): _snake_case : Any = 1 _snake_case : Optional[int] = 2 for i in range(2 , max_n + 1 ): _snake_case : Union[str, Any] = pre_numerator _snake_case : Optional[Any] = 2 * i // 3 if i % 3 == 0 else 1 _snake_case : Dict = cur_numerator _snake_case : str = e_cont * pre_numerator + temp return sum_digits(__lowerCAmelCase ) if __name__ == "__main__": print(F'''{solution() = }''')
304
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) __snake_case : List[str] = { 'configuration_layoutlmv2': ['LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LayoutLMv2Config'], 'processing_layoutlmv2': ['LayoutLMv2Processor'], 'tokenization_layoutlmv2': ['LayoutLMv2Tokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : str = ['LayoutLMv2TokenizerFast'] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : int = ['LayoutLMv2FeatureExtractor'] __snake_case : Optional[Any] = ['LayoutLMv2ImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : Optional[Any] = [ 'LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST', 'LayoutLMv2ForQuestionAnswering', 'LayoutLMv2ForSequenceClassification', 'LayoutLMv2ForTokenClassification', 'LayoutLMv2Layer', 'LayoutLMv2Model', 'LayoutLMv2PreTrainedModel', ] if TYPE_CHECKING: from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaLayer, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) else: import sys __snake_case : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
615
"""simple docstring""" import argparse import json import os import time import zipfile from get_ci_error_statistics import download_artifact, get_artifacts_links from transformers import logging __snake_case : Tuple = logging.get_logger(__name__) def _lowercase ( __snake_case ,__snake_case ) -> Dict: __lowerCAmelCase : Optional[Any] = set() __lowerCAmelCase : List[Any] = [] def parse_line(__snake_case ): for line in fp: if isinstance(__snake_case ,__snake_case ): __lowerCAmelCase : Tuple = line.decode("UTF-8" ) if "warnings summary (final)" in line: continue # This means we are outside the body of a warning elif not line.startswith(" " ): # process a single warning and move it to `selected_warnings`. if len(__snake_case ) > 0: __lowerCAmelCase : List[Any] = "\n".join(__snake_case ) # Only keep the warnings specified in `targets` if any(F""": {x}: """ in warning for x in targets ): selected_warnings.add(__snake_case ) buffer.clear() continue else: __lowerCAmelCase : List[str] = line.strip() buffer.append(__snake_case ) if from_gh: for filename in os.listdir(__snake_case ): __lowerCAmelCase : List[str] = os.path.join(__snake_case ,__snake_case ) if not os.path.isdir(__snake_case ): # read the file if filename != "warnings.txt": continue with open(__snake_case ) as fp: parse_line(__snake_case ) else: try: with zipfile.ZipFile(__snake_case ) as z: for filename in z.namelist(): if not os.path.isdir(__snake_case ): # read the file if filename != "warnings.txt": continue with z.open(__snake_case ) as fp: parse_line(__snake_case ) except Exception: logger.warning( F"""{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.""" ) return selected_warnings def _lowercase ( __snake_case ,__snake_case ) -> Any: __lowerCAmelCase : Any = set() __lowerCAmelCase : str = [os.path.join(__snake_case ,__snake_case ) for p in os.listdir(__snake_case ) if (p.endswith(".zip" ) or from_gh)] for p in paths: selected_warnings.update(extract_warnings_from_single_artifact(__snake_case ,__snake_case ) ) return selected_warnings if __name__ == "__main__": def _lowercase ( __snake_case ) -> Optional[Any]: return values.split("," ) __snake_case : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.') parser.add_argument( '--output_dir', type=str, required=True, help='Where to store the downloaded artifacts and other result files.', ) parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.') # optional parameters parser.add_argument( '--targets', default='DeprecationWarning,UserWarning,FutureWarning', type=list_str, help='Comma-separated list of target warning(s) which we want to extract.', ) parser.add_argument( '--from_gh', action='store_true', help='If running from a GitHub action workflow and collecting warnings from its artifacts.', ) __snake_case : Tuple = parser.parse_args() __snake_case : Any = args.from_gh if from_gh: # The artifacts have to be downloaded using `actions/download-artifact@v3` pass else: os.makedirs(args.output_dir, exist_ok=True) # get download links __snake_case : str = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) # download artifacts for idx, (name, url) in enumerate(artifacts.items()): print(name) print(url) print('=' * 80) download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) # extract warnings from artifacts __snake_case : Tuple = extract_warnings(args.output_dir, args.targets) __snake_case : List[str] = sorted(selected_warnings) with open(os.path.join(args.output_dir, 'selected_warnings.json'), 'w', encoding='UTF-8') as fp: json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
615
1
'''simple docstring''' import inspect import unittest from transformers import DecisionTransformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import DecisionTransformerModel from transformers.models.decision_transformer.modeling_decision_transformer import ( DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) class _a : '''simple docstring''' def __init__( self ,__a ,__a=13 ,__a=7 ,__a=6 ,__a=17 ,__a=23 ,__a=11 ,__a=True ,) -> List[Any]: snake_case : List[str] = parent snake_case : Union[str, Any] = batch_size snake_case : List[str] = seq_length snake_case : List[Any] = act_dim snake_case : Optional[int] = state_dim snake_case : Union[str, Any] = hidden_size snake_case : Any = max_length snake_case : Any = is_training def snake_case_ ( self ) -> Union[str, Any]: snake_case : Union[str, Any] = floats_tensor((self.batch_size, self.seq_length, self.state_dim) ) snake_case : Tuple = floats_tensor((self.batch_size, self.seq_length, self.act_dim) ) snake_case : Any = floats_tensor((self.batch_size, self.seq_length, 1) ) snake_case : List[Any] = floats_tensor((self.batch_size, self.seq_length, 1) ) snake_case : str = ids_tensor((self.batch_size, self.seq_length) ,vocab_size=1_000 ) snake_case : Any = random_attention_mask((self.batch_size, self.seq_length) ) snake_case : int = self.get_config() return ( config, states, actions, rewards, returns_to_go, timesteps, attention_mask, ) def snake_case_ ( self ) -> Optional[int]: return DecisionTransformerConfig( batch_size=self.batch_size ,seq_length=self.seq_length ,act_dim=self.act_dim ,state_dim=self.state_dim ,hidden_size=self.hidden_size ,max_length=self.max_length ,) def snake_case_ ( self ,__a ,__a ,__a ,__a ,__a ,__a ,__a ,) -> Dict: snake_case : Any = DecisionTransformerModel(config=__a ) model.to(__a ) model.eval() snake_case : Tuple = model(__a ,__a ,__a ,__a ,__a ,__a ) self.parent.assertEqual(result.state_preds.shape ,states.shape ) self.parent.assertEqual(result.action_preds.shape ,actions.shape ) self.parent.assertEqual(result.return_preds.shape ,returns_to_go.shape ) self.parent.assertEqual( result.last_hidden_state.shape ,(self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions def snake_case_ ( self ) -> int: snake_case : Optional[Any] = self.prepare_config_and_inputs() ( ( snake_case ) , ( snake_case ) , ( snake_case ) , ( snake_case ) , ( snake_case ) , ( snake_case ) , ( snake_case ) , ) : int = config_and_inputs snake_case : int = { """states""": states, """actions""": actions, """rewards""": rewards, """returns_to_go""": returns_to_go, """timesteps""": timesteps, """attention_mask""": attention_mask, } return config, inputs_dict @require_torch class _a (a__, a__, a__, unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ : Optional[int] = (DecisionTransformerModel,) if is_torch_available() else () lowerCAmelCase_ : Tuple = () lowerCAmelCase_ : str = {"""feature-extraction""": DecisionTransformerModel} if is_torch_available() else {} # Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids lowerCAmelCase_ : List[str] = False # Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features lowerCAmelCase_ : Dict = False lowerCAmelCase_ : int = False lowerCAmelCase_ : Optional[int] = False lowerCAmelCase_ : List[Any] = False lowerCAmelCase_ : List[Any] = False lowerCAmelCase_ : Any = False lowerCAmelCase_ : Optional[Any] = False lowerCAmelCase_ : Optional[int] = False lowerCAmelCase_ : Any = False def snake_case_ ( self ) -> List[Any]: snake_case : int = DecisionTransformerModelTester(self ) snake_case : int = ConfigTester(self ,config_class=__a ,hidden_size=37 ) def snake_case_ ( self ) -> Optional[Any]: self.config_tester.run_common_tests() def snake_case_ ( self ) -> int: snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a ) @slow def snake_case_ ( self ) -> List[Any]: for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case : Union[str, Any] = DecisionTransformerModel.from_pretrained(__a ) self.assertIsNotNone(__a ) def snake_case_ ( self ) -> Optional[Any]: snake_case , snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case : Optional[int] = model_class(__a ) snake_case : Any = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case : Union[str, Any] = [*signature.parameters.keys()] snake_case : Optional[Any] = [ """states""", """actions""", """rewards""", """returns_to_go""", """timesteps""", """attention_mask""", ] self.assertListEqual(arg_names[: len(__a )] ,__a ) @require_torch class _a (unittest.TestCase ): '''simple docstring''' @slow def snake_case_ ( self ) -> List[Any]: snake_case : Optional[int] = 2 # number of steps of autoregressive prediction we will perform snake_case : Union[str, Any] = 10 # defined by the RL environment, may be normalized snake_case : Tuple = DecisionTransformerModel.from_pretrained("""edbeeching/decision-transformer-gym-hopper-expert""" ) snake_case : List[Any] = model.to(__a ) snake_case : int = model.config torch.manual_seed(0 ) snake_case : Optional[int] = torch.randn(1 ,1 ,config.state_dim ).to(device=__a ,dtype=torch.floataa ) # env.reset() snake_case : List[str] = torch.tensor( [[0.242_793, -0.28_693_074, 0.8_742_613], [0.67_815_274, -0.08_101_085, -0.12_952_147]] ,device=__a ) snake_case : Optional[Any] = torch.tensor(__a ,device=__a ,dtype=torch.floataa ).reshape(1 ,1 ,1 ) snake_case : Optional[int] = state snake_case : List[Any] = torch.zeros(1 ,0 ,config.act_dim ,device=__a ,dtype=torch.floataa ) snake_case : Tuple = torch.zeros(1 ,0 ,device=__a ,dtype=torch.floataa ) snake_case : Tuple = torch.tensor(0 ,device=__a ,dtype=torch.long ).reshape(1 ,1 ) for step in range(__a ): snake_case : str = torch.cat([actions, torch.zeros(1 ,1 ,config.act_dim ,device=__a )] ,dim=1 ) snake_case : Tuple = torch.cat([rewards, torch.zeros(1 ,1 ,device=__a )] ,dim=1 ) snake_case : List[str] = torch.ones(1 ,states.shape[1] ).to(dtype=torch.long ,device=states.device ) with torch.no_grad(): snake_case , snake_case , snake_case : List[Any] = model( states=__a ,actions=__a ,rewards=__a ,returns_to_go=__a ,timesteps=__a ,attention_mask=__a ,return_dict=__a ,) self.assertEqual(action_pred.shape ,actions.shape ) self.assertTrue(torch.allclose(action_pred[0, -1] ,expected_outputs[step] ,atol=1E-4 ) ) snake_case , snake_case , snake_case , snake_case : str = ( # env.step(action) torch.randn(1 ,1 ,config.state_dim ).to(device=__a ,dtype=torch.floataa ), 1.0, False, {}, ) snake_case : Dict = action_pred[0, -1] snake_case : Optional[int] = torch.cat([states, state] ,dim=1 ) snake_case : Tuple = returns_to_go[0, -1] - reward snake_case : Tuple = torch.cat([returns_to_go, pred_return.reshape(1 ,1 ,1 )] ,dim=1 ) snake_case : List[str] = torch.cat( [timesteps, torch.ones((1, 1) ,device=__a ,dtype=torch.long ) * (step + 1)] ,dim=1 )
116
'''simple docstring''' import os import time import pytest from datasets.utils.filelock import FileLock, Timeout def lowerCamelCase__ ( __lowercase ): snake_case : List[Any] = FileLock(str(tmpdir / """foo.lock""" ) ) snake_case : str = FileLock(str(tmpdir / """foo.lock""" ) ) snake_case : Any = 0.01 with locka.acquire(): with pytest.raises(__lowercase ): snake_case : Union[str, Any] = time.time() locka.acquire(__lowercase ) assert time.time() - _start > timeout def lowerCamelCase__ ( __lowercase ): snake_case : List[Any] = """a""" * 1_000 + """.lock""" snake_case : List[str] = FileLock(str(tmpdir / filename ) ) assert locka._lock_file.endswith(""".lock""" ) assert not locka._lock_file.endswith(__lowercase ) assert len(os.path.basename(locka._lock_file ) ) <= 255 snake_case : Any = FileLock(tmpdir / filename ) with locka.acquire(): with pytest.raises(__lowercase ): locka.acquire(0 )
116
1
from __future__ import annotations lowercase_: Union[str, Any] = 1.6_021e-19 # units = C def _lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , ): """simple docstring""" if (conductivity, electron_conc, mobility).count(0) != 1: raise ValueError("""You cannot supply more or less than 2 values""") elif conductivity < 0: raise ValueError("""Conductivity cannot be negative""") elif electron_conc < 0: raise ValueError("""Electron concentration cannot be negative""") elif mobility < 0: raise ValueError("""mobility cannot be negative""") elif conductivity == 0: return ( "conductivity", mobility * electron_conc * ELECTRON_CHARGE, ) elif electron_conc == 0: return ( "electron_conc", conductivity / (mobility * ELECTRON_CHARGE), ) else: return ( "mobility", conductivity / (electron_conc * ELECTRON_CHARGE), ) if __name__ == "__main__": import doctest doctest.testmod()
127
from typing import List, Optional, Union import numpy as np import tensorflow as tf from .utils import logging lowercase_: Union[str, Any] = logging.get_logger(__name__) def _lowercase ( UpperCAmelCase_): """simple docstring""" if isinstance(UpperCAmelCase_ , np.ndarray): return list(tensor.shape) snake_case__ : List[Any] = tf.shape(UpperCAmelCase_) if tensor.shape == tf.TensorShape(UpperCAmelCase_): return dynamic snake_case__ : Tuple = tensor.shape.as_list() return [dynamic[i] if s is None else s for i, s in enumerate(UpperCAmelCase_)] def _lowercase ( UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = None): """simple docstring""" return tf.nn.softmax(logits=logits + 1e-9 , axis=UpperCAmelCase_ , name=UpperCAmelCase_) def _lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=1e-5 , UpperCAmelCase_=-1): """simple docstring""" if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(UpperCAmelCase_ , UpperCAmelCase_): raise NotImplementedError("""Only 1D weight and bias tensors are supported for now, with only a single axis.""") # Get mean and variance on the axis to be normalized snake_case__ , snake_case__ : Any = tf.nn.moments(UpperCAmelCase_ , axes=[axis] , keepdims=UpperCAmelCase_) if axis != -1: # Reshape scale and weight to have the same rank as inputs, but with 1 dimensions # on every dimension except axis snake_case__ : Optional[Any] = [1] * inputs.shape.rank snake_case__ : Optional[int] = shape_list(UpperCAmelCase_)[axis] snake_case__ : Tuple = tf.reshape(UpperCAmelCase_ , UpperCAmelCase_) snake_case__ : Tuple = tf.reshape(UpperCAmelCase_ , UpperCAmelCase_) # Compute layer normalization using the batch_normalization # function. snake_case__ : List[str] = tf.nn.batch_normalization( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , offset=UpperCAmelCase_ , scale=UpperCAmelCase_ , variance_epsilon=UpperCAmelCase_ , ) return outputs def _lowercase ( UpperCAmelCase_ , UpperCAmelCase_=0 , UpperCAmelCase_=-1): """simple docstring""" if end_dim < 0: end_dim += input.shape.rank if start_dim < 0: start_dim += input.shape.rank if start_dim == end_dim: return input snake_case__ : Optional[int] = tf.shape(UpperCAmelCase_) snake_case__ : List[Any] = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1]) snake_case__ : Optional[Any] = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0) return tf.reshape(UpperCAmelCase_ , UpperCAmelCase_) def _lowercase ( UpperCAmelCase_): """simple docstring""" if not isinstance(UpperCAmelCase_ , tf.Tensor): snake_case__ : int = tf.convert_to_tensor(UpperCAmelCase_) # Catches stray NumPy inputs if encoder_attention_mask.shape.rank == 3: snake_case__ : Dict = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.shape.rank == 2: snake_case__ : List[Any] = encoder_attention_mask[:, None, None, :] # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow # /transformer/transformer_layers.py#L270 # encoder_extended_attention_mask = (encoder_extended_attention_mask == # encoder_extended_attention_mask.transpose(-1, -2)) snake_case__ : Dict = ( tf.cast(1 , encoder_attention_mask.dtype) - encoder_extended_attention_mask ) * encoder_extended_attention_mask.dtype.min return encoder_extended_attention_mask def _lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = "input_ids"): """simple docstring""" tf.debugging.assert_less( UpperCAmelCase_ , tf.cast(UpperCAmelCase_ , dtype=tensor.dtype) , message=( F'The maximum value of {tensor_name} ({tf.math.reduce_max(UpperCAmelCase_)}) must be smaller than the embedding ' F'layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.' ) , ) def _lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_): """simple docstring""" snake_case__ : List[str] = 64_512 # Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT` # because in that case even chunking the array would not make the saving # possible. snake_case__ : Tuple = [x for x in data if len(UpperCAmelCase_) > HDF5_OBJECT_HEADER_LIMIT] # Expecting this to never be true. if bad_attributes: raise RuntimeError( """The following attributes cannot be saved to HDF5 file because """ F'they are larger than {HDF5_OBJECT_HEADER_LIMIT} ' F'bytes: {bad_attributes}') snake_case__ : Optional[int] = np.asarray(UpperCAmelCase_) snake_case__ : Tuple = 1 snake_case__ : Optional[Any] = np.array_split(UpperCAmelCase_ , UpperCAmelCase_) # This will never loop forever thanks to the test above. while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data): num_chunks += 1 snake_case__ : Tuple = np.array_split(UpperCAmelCase_ , UpperCAmelCase_) if num_chunks > 1: for chunk_id, chunk_data in enumerate(UpperCAmelCase_): snake_case__ : List[str] = chunk_data else: snake_case__ : Union[str, Any] = data def _lowercase ( UpperCAmelCase_ , UpperCAmelCase_): """simple docstring""" if name in group.attrs: snake_case__ : int = [n.decode("""utf8""") if hasattr(UpperCAmelCase_ , """decode""") else n for n in group.attrs[name]] else: snake_case__ : Tuple = [] snake_case__ : Optional[Any] = 0 while "%s%d" % (name, chunk_id) in group.attrs: data.extend( [n.decode("""utf8""") if hasattr(UpperCAmelCase_ , """decode""") else n for n in group.attrs["""%s%d""" % (name, chunk_id)]]) chunk_id += 1 return data def _lowercase ( UpperCAmelCase_): """simple docstring""" def _expand_single_ad_tensor(UpperCAmelCase_): if isinstance(UpperCAmelCase_ , tf.Tensor) and t.shape.rank == 1: return tf.expand_dims(UpperCAmelCase_ , axis=-1) return t return tf.nest.map_structure(_expand_single_ad_tensor , UpperCAmelCase_)
127
1
"""simple docstring""" def lowercase__ ( lowerCamelCase = 100 ): _SCREAMING_SNAKE_CASE : Any = (n * (n + 1) // 2) ** 2 _SCREAMING_SNAKE_CASE : List[Any] = n * (n + 1) * (2 * n + 1) // 6 return sum_cubes - sum_squares if __name__ == "__main__": print(F'{solution() = }')
621
"""simple docstring""" from ...utils import is_torch_available, is_transformers_available if is_transformers_available() and is_torch_available(): from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
621
1
'''simple docstring''' from typing import List, Optional, Union import torch from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) __UpperCamelCase : int = logging.get_logger(__name__) # pylint: disable=invalid-name __UpperCamelCase : Optional[Any] = ''' Examples: ```py >>> import torch >>> import numpy as np >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline >>> from transformers import pipeline >>> from diffusers.utils import load_image >>> def make_hint(image, depth_estimator): ... image = depth_estimator(image)["depth"] ... image = np.array(image) ... image = image[:, :, None] ... image = np.concatenate([image, image, image], axis=2) ... detected_map = torch.from_numpy(image).float() / 255.0 ... hint = detected_map.permute(2, 0, 1) ... return hint >>> depth_estimator = pipeline("depth-estimation") >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained( ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16 ... ) >>> pipe_prior = pipe_prior.to("cuda") >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained( ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16 ... ) >>> pipe = pipe.to("cuda") >>> img = load_image( ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" ... "/kandinsky/cat.png" ... ).resize((768, 768)) >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda") >>> prompt = "A robot, 4k photo" >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature" >>> generator = torch.Generator(device="cuda").manual_seed(43) >>> image_emb, zero_image_emb = pipe_prior( ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator ... ).to_tuple() >>> images = pipe( ... image_embeds=image_emb, ... negative_image_embeds=zero_image_emb, ... hint=hint, ... num_inference_steps=50, ... generator=generator, ... height=768, ... width=768, ... ).images >>> images[0].save("robot_cat.png") ``` ''' def lowercase ( lowerCAmelCase : Dict , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : str=8): """simple docstring""" _A : List[Any] = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 _A : Tuple = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor class lowerCamelCase__ ( snake_case_ ): """simple docstring""" def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ) -> List[Any]: super().__init__() self.register_modules( unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ , movq=UpperCAmelCase__ , ) _A : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1) def _lowerCamelCase ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Optional[Any]: if latents is None: _A : Optional[int] = randn_tensor(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=UpperCAmelCase__ , dtype=UpperCAmelCase__ ) else: if latents.shape != shape: raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" ) _A : Tuple = latents.to(UpperCAmelCase__ ) _A : Tuple = latents * scheduler.init_noise_sigma return latents def _lowerCamelCase ( self , UpperCAmelCase__=0 ) -> Dict: if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError('''Please install accelerate via `pip install accelerate`''' ) _A : int = torch.device(F"""cuda:{gpu_id}""" ) _A : Optional[int] = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(UpperCAmelCase__ , UpperCAmelCase__ ) def _lowerCamelCase ( self , UpperCAmelCase__=0 ) -> Optional[Any]: if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ): from accelerate import cpu_offload_with_hook else: raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' ) _A : Any = torch.device(F"""cuda:{gpu_id}""" ) if self.device.type != "cpu": self.to('''cpu''' , silence_dtype_warnings=UpperCAmelCase__ ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) _A : Union[str, Any] = None for cpu_offloaded_model in [self.unet, self.movq]: _A , _A : str = cpu_offload_with_hook(UpperCAmelCase__ , UpperCAmelCase__ , prev_module_hook=UpperCAmelCase__ ) # We'll offload the last model manually. _A : Tuple = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def _lowerCamelCase ( self ) -> Any: if not hasattr(self.unet , '''_hf_hook''' ): return self.device for module in self.unet.modules(): if ( hasattr(UpperCAmelCase__ , '''_hf_hook''' ) and hasattr(module._hf_hook , '''execution_device''' ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(UpperCAmelCase__ ) def __call__( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = 5_1_2 , UpperCAmelCase__ = 5_1_2 , UpperCAmelCase__ = 1_0_0 , UpperCAmelCase__ = 4.0 , UpperCAmelCase__ = 1 , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = "pil" , UpperCAmelCase__ = True , ) -> Optional[Any]: _A : Optional[int] = self._execution_device _A : str = guidance_scale > 1.0 if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): _A : Optional[Any] = torch.cat(UpperCAmelCase__ , dim=0 ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): _A : Union[str, Any] = torch.cat(UpperCAmelCase__ , dim=0 ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): _A : List[Any] = torch.cat(UpperCAmelCase__ , dim=0 ) _A : Tuple = image_embeds.shape[0] * num_images_per_prompt if do_classifier_free_guidance: _A : str = image_embeds.repeat_interleave(UpperCAmelCase__ , dim=0 ) _A : Any = negative_image_embeds.repeat_interleave(UpperCAmelCase__ , dim=0 ) _A : Any = hint.repeat_interleave(UpperCAmelCase__ , dim=0 ) _A : List[str] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCAmelCase__ ) _A : Any = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCAmelCase__ ) self.scheduler.set_timesteps(UpperCAmelCase__ , device=UpperCAmelCase__ ) _A : Tuple = self.scheduler.timesteps _A : Union[str, Any] = self.movq.config.latent_channels _A , _A : List[str] = downscale_height_and_width(UpperCAmelCase__ , UpperCAmelCase__ , self.movq_scale_factor ) # create initial latent _A : List[Any] = self.prepare_latents( (batch_size, num_channels_latents, height, width) , image_embeds.dtype , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , self.scheduler , ) for i, t in enumerate(self.progress_bar(UpperCAmelCase__ ) ): # expand the latents if we are doing classifier free guidance _A : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents _A : Tuple = {'''image_embeds''': image_embeds, '''hint''': hint} _A : List[Any] = self.unet( sample=UpperCAmelCase__ , timestep=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , added_cond_kwargs=UpperCAmelCase__ , return_dict=UpperCAmelCase__ , )[0] if do_classifier_free_guidance: _A , _A : Union[str, Any] = noise_pred.split(latents.shape[1] , dim=1 ) _A , _A : Union[str, Any] = noise_pred.chunk(2 ) _A , _A : Optional[Any] = variance_pred.chunk(2 ) _A : Any = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) _A : Dict = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , '''variance_type''' ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): _A , _A : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 _A : Optional[Any] = self.scheduler.step( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , generator=UpperCAmelCase__ , )[0] # post-processing _A : Tuple = self.movq.decode(UpperCAmelCase__ , force_not_quantize=UpperCAmelCase__ )['''sample'''] if output_type not in ["pt", "np", "pil"]: raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" ) if output_type in ["np", "pil"]: _A : Union[str, Any] = image * 0.5 + 0.5 _A : Tuple = image.clamp(0 , 1 ) _A : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": _A : Tuple = self.numpy_to_pil(UpperCAmelCase__ ) if not return_dict: return (image,) return ImagePipelineOutput(images=UpperCAmelCase__ )
417
'''simple docstring''' from collections import namedtuple import requests from lxml import html # type: ignore __UpperCamelCase : Union[str, Any] = namedtuple('''covid_data''', '''cases deaths recovered''') def lowercase ( lowerCAmelCase : str = "https://www.worldometers.info/coronavirus/"): """simple docstring""" _A : int = '''//div[@class = "maincounter-number"]/span/text()''' return covid_data(*html.fromstring(requests.get(lowerCAmelCase).content).xpath(lowerCAmelCase)) __UpperCamelCase : List[Any] = '''Total COVID-19 cases in the world: {} Total deaths due to COVID-19 in the world: {} Total COVID-19 patients recovered in the world: {}''' print(fmt.format(*covid_stats()))
417
1
"""simple docstring""" a__ : int = 8.314462 # Unit - J mol-1 K-1 def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ): """simple docstring""" if moles < 0 or kelvin < 0 or volume < 0: raise ValueError('Invalid inputs. Enter positive value.' ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ): """simple docstring""" if moles < 0 or kelvin < 0 or pressure < 0: raise ValueError('Invalid inputs. Enter positive value.' ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure if __name__ == "__main__": from doctest import testmod testmod()
589
"""simple docstring""" import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import Callable, Dict, List, Tuple import timm import torch import torch.nn as nn from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf from huggingface_hub import cached_download, hf_hub_url from torch import Tensor from vissl.models.model_helpers import get_trunk_forward_outputs from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel from transformers.utils import logging logging.set_verbosity_info() a__ : int = logging.get_logger() @dataclass class __magic_name__ : UpperCamelCase : nn.Module UpperCamelCase : List[nn.Module] = field(default_factory=_UpperCamelCase ) UpperCamelCase : list = field(default_factory=_UpperCamelCase ) def _lowerCamelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ ): """simple docstring""" _lowerCAmelCase = len(list(m.modules() ) ) == 1 or isinstance(__magic_name__ , nn.Convad ) or isinstance(__magic_name__ , nn.BatchNormad ) if has_not_submodules: self.traced.append(__magic_name__ ) def __call__( self , __magic_name__ ): """simple docstring""" for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook ) ) self.module(__magic_name__ ) [x.remove() for x in self.handles] return self @property def _lowerCamelCase ( self ): """simple docstring""" return list(filter(lambda __magic_name__ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) ) @dataclass class __magic_name__ : UpperCamelCase : nn.Module UpperCamelCase : nn.Module UpperCamelCase : int = 1 UpperCamelCase : List = field(default_factory=_UpperCamelCase ) UpperCamelCase : List = field(default_factory=_UpperCamelCase ) UpperCamelCase : bool = True def __call__( self , __magic_name__ ): """simple docstring""" _lowerCAmelCase = Tracker(self.dest )(__magic_name__ ).parametrized _lowerCAmelCase = Tracker(self.src )(__magic_name__ ).parametrized _lowerCAmelCase = list(filter(lambda __magic_name__ : type(__magic_name__ ) not in self.src_skip , __magic_name__ ) ) _lowerCAmelCase = list(filter(lambda __magic_name__ : type(__magic_name__ ) not in self.dest_skip , __magic_name__ ) ) if len(__magic_name__ ) != len(__magic_name__ ) and self.raise_if_mismatch: raise Exception( F'''Numbers of operations are different. Source module has {len(__magic_name__ )} operations while''' F''' destination module has {len(__magic_name__ )}.''' ) for dest_m, src_m in zip(__magic_name__ , __magic_name__ ): dest_m.load_state_dict(src_m.state_dict() ) if self.verbose == 1: print(F'''Transfered from={src_m} to={dest_m}''' ) class __magic_name__ ( nn.Module ): def __init__( self , __magic_name__ ): """simple docstring""" super().__init__() _lowerCAmelCase = [] # - get the stem feature_blocks.append(('conv1', model.stem) ) # - get all the feature blocks for k, v in model.trunk_output.named_children(): assert k.startswith('block' ), F'''Unexpected layer name {k}''' _lowerCAmelCase = len(__magic_name__ ) + 1 feature_blocks.append((F'''res{block_index}''', v) ) _lowerCAmelCase = nn.ModuleDict(__magic_name__ ) def _lowerCamelCase ( self , __magic_name__ ): """simple docstring""" return get_trunk_forward_outputs( __magic_name__ , out_feat_keys=__magic_name__ , feature_blocks=self._feature_blocks , ) class __magic_name__ ( _UpperCamelCase ): def _lowerCamelCase ( self , __magic_name__ ): """simple docstring""" _lowerCAmelCase = x.split('-' ) return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] ) def __getitem__( self , __magic_name__ ): """simple docstring""" if x not in self: _lowerCAmelCase = self.convert_name_to_timm(__magic_name__ ) _lowerCAmelCase = partial(lambda: (timm.create_model(__magic_name__ , pretrained=__magic_name__ ).eval(), None) ) else: _lowerCAmelCase = super().__getitem__(__magic_name__ ) return val class __magic_name__ ( _UpperCamelCase ): def __getitem__( self , __magic_name__ ): """simple docstring""" if "seer" in x and "in1k" not in x: _lowerCAmelCase = RegNetModel else: _lowerCAmelCase = RegNetForImageClassification return val def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ): """simple docstring""" for from_key, to_key in keys: _lowerCAmelCase = from_state_dict[from_key].clone() print(F'''Copied key={from_key} to={to_key}''' ) return to_state_dict def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = True, ): """simple docstring""" print(F'''Converting {name}...''' ) with torch.no_grad(): _lowerCAmelCase , _lowerCAmelCase = from_model_func() _lowerCAmelCase = our_model_func(__lowerCamelCase ).eval() _lowerCAmelCase = ModuleTransfer(src=__lowerCamelCase, dest=__lowerCamelCase, raise_if_mismatch=__lowerCamelCase ) _lowerCAmelCase = torch.randn((1, 3, 2_2_4, 2_2_4) ) module_transfer(__lowerCamelCase ) if from_state_dict is not None: _lowerCAmelCase = [] # for seer - in1k finetuned we have to manually copy the head if "seer" in name and "in1k" in name: _lowerCAmelCase = [('0.clf.0.weight', 'classifier.1.weight'), ('0.clf.0.bias', 'classifier.1.bias')] _lowerCAmelCase = manually_copy_vissl_head(__lowerCamelCase, our_model.state_dict(), __lowerCamelCase ) our_model.load_state_dict(__lowerCamelCase ) _lowerCAmelCase = our_model(__lowerCamelCase, output_hidden_states=__lowerCamelCase ) _lowerCAmelCase = ( our_outputs.logits if isinstance(__lowerCamelCase, __lowerCamelCase ) else our_outputs.last_hidden_state ) _lowerCAmelCase = from_model(__lowerCamelCase ) _lowerCAmelCase = from_output[-1] if type(__lowerCamelCase ) is list else from_output # now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state if "seer" in name and "in1k" in name: _lowerCAmelCase = our_outputs.hidden_states[-1] assert torch.allclose(__lowerCamelCase, __lowerCamelCase ), "The model logits don't match the original one." if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / name, commit_message='Add model', use_temp_dir=__lowerCamelCase, ) _lowerCAmelCase = 2_2_4 if 'seer' not in name else 3_8_4 # we can use the convnext one _lowerCAmelCase = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k', size=__lowerCamelCase ) image_processor.push_to_hub( repo_path_or_name=save_directory / name, commit_message='Add image processor', use_temp_dir=__lowerCamelCase, ) print(F'''Pushed {name}''' ) def A__ ( __lowerCamelCase, __lowerCamelCase = None, __lowerCamelCase = True ): """simple docstring""" _lowerCAmelCase = 'imagenet-1k-id2label.json' _lowerCAmelCase = 1_0_0_0 _lowerCAmelCase = (1, num_labels) _lowerCAmelCase = 'huggingface/label-files' _lowerCAmelCase = num_labels _lowerCAmelCase = json.load(open(cached_download(hf_hub_url(__lowerCamelCase, __lowerCamelCase, repo_type='dataset' ) ), 'r' ) ) _lowerCAmelCase = {int(__lowerCamelCase ): v for k, v in idalabel.items()} _lowerCAmelCase = idalabel _lowerCAmelCase = {v: k for k, v in idalabel.items()} _lowerCAmelCase = partial(__lowerCamelCase, num_labels=__lowerCamelCase, idalabel=__lowerCamelCase, labelaid=__lowerCamelCase ) _lowerCAmelCase = { 'regnet-x-002': ImageNetPreTrainedConfig( depths=[1, 1, 4, 7], hidden_sizes=[2_4, 5_6, 1_5_2, 3_6_8], groups_width=8, layer_type='x' ), 'regnet-x-004': ImageNetPreTrainedConfig( depths=[1, 2, 7, 1_2], hidden_sizes=[3_2, 6_4, 1_6_0, 3_8_4], groups_width=1_6, layer_type='x' ), 'regnet-x-006': ImageNetPreTrainedConfig( depths=[1, 3, 5, 7], hidden_sizes=[4_8, 9_6, 2_4_0, 5_2_8], groups_width=2_4, layer_type='x' ), 'regnet-x-008': ImageNetPreTrainedConfig( depths=[1, 3, 7, 5], hidden_sizes=[6_4, 1_2_8, 2_8_8, 6_7_2], groups_width=1_6, layer_type='x' ), 'regnet-x-016': ImageNetPreTrainedConfig( depths=[2, 4, 1_0, 2], hidden_sizes=[7_2, 1_6_8, 4_0_8, 9_1_2], groups_width=2_4, layer_type='x' ), 'regnet-x-032': ImageNetPreTrainedConfig( depths=[2, 6, 1_5, 2], hidden_sizes=[9_6, 1_9_2, 4_3_2, 1_0_0_8], groups_width=4_8, layer_type='x' ), 'regnet-x-040': ImageNetPreTrainedConfig( depths=[2, 5, 1_4, 2], hidden_sizes=[8_0, 2_4_0, 5_6_0, 1_3_6_0], groups_width=4_0, layer_type='x' ), 'regnet-x-064': ImageNetPreTrainedConfig( depths=[2, 4, 1_0, 1], hidden_sizes=[1_6_8, 3_9_2, 7_8_4, 1_6_2_4], groups_width=5_6, layer_type='x' ), 'regnet-x-080': ImageNetPreTrainedConfig( depths=[2, 5, 1_5, 1], hidden_sizes=[8_0, 2_4_0, 7_2_0, 1_9_2_0], groups_width=1_2_0, layer_type='x' ), 'regnet-x-120': ImageNetPreTrainedConfig( depths=[2, 5, 1_1, 1], hidden_sizes=[2_2_4, 4_4_8, 8_9_6, 2_2_4_0], groups_width=1_1_2, layer_type='x' ), 'regnet-x-160': ImageNetPreTrainedConfig( depths=[2, 6, 1_3, 1], hidden_sizes=[2_5_6, 5_1_2, 8_9_6, 2_0_4_8], groups_width=1_2_8, layer_type='x' ), 'regnet-x-320': ImageNetPreTrainedConfig( depths=[2, 7, 1_3, 1], hidden_sizes=[3_3_6, 6_7_2, 1_3_4_4, 2_5_2_0], groups_width=1_6_8, layer_type='x' ), # y variant 'regnet-y-002': ImageNetPreTrainedConfig(depths=[1, 1, 4, 7], hidden_sizes=[2_4, 5_6, 1_5_2, 3_6_8], groups_width=8 ), 'regnet-y-004': ImageNetPreTrainedConfig( depths=[1, 3, 6, 6], hidden_sizes=[4_8, 1_0_4, 2_0_8, 4_4_0], groups_width=8 ), 'regnet-y-006': ImageNetPreTrainedConfig( depths=[1, 3, 7, 4], hidden_sizes=[4_8, 1_1_2, 2_5_6, 6_0_8], groups_width=1_6 ), 'regnet-y-008': ImageNetPreTrainedConfig( depths=[1, 3, 8, 2], hidden_sizes=[6_4, 1_2_8, 3_2_0, 7_6_8], groups_width=1_6 ), 'regnet-y-016': ImageNetPreTrainedConfig( depths=[2, 6, 1_7, 2], hidden_sizes=[4_8, 1_2_0, 3_3_6, 8_8_8], groups_width=2_4 ), 'regnet-y-032': ImageNetPreTrainedConfig( depths=[2, 5, 1_3, 1], hidden_sizes=[7_2, 2_1_6, 5_7_6, 1_5_1_2], groups_width=2_4 ), 'regnet-y-040': ImageNetPreTrainedConfig( depths=[2, 6, 1_2, 2], hidden_sizes=[1_2_8, 1_9_2, 5_1_2, 1_0_8_8], groups_width=6_4 ), 'regnet-y-064': ImageNetPreTrainedConfig( depths=[2, 7, 1_4, 2], hidden_sizes=[1_4_4, 2_8_8, 5_7_6, 1_2_9_6], groups_width=7_2 ), 'regnet-y-080': ImageNetPreTrainedConfig( depths=[2, 4, 1_0, 1], hidden_sizes=[1_6_8, 4_4_8, 8_9_6, 2_0_1_6], groups_width=5_6 ), 'regnet-y-120': ImageNetPreTrainedConfig( depths=[2, 5, 1_1, 1], hidden_sizes=[2_2_4, 4_4_8, 8_9_6, 2_2_4_0], groups_width=1_1_2 ), 'regnet-y-160': ImageNetPreTrainedConfig( depths=[2, 4, 1_1, 1], hidden_sizes=[2_2_4, 4_4_8, 1_2_3_2, 3_0_2_4], groups_width=1_1_2 ), 'regnet-y-320': ImageNetPreTrainedConfig( depths=[2, 5, 1_2, 1], hidden_sizes=[2_3_2, 6_9_6, 1_3_9_2, 3_7_1_2], groups_width=2_3_2 ), # models created by SEER -> https://arxiv.org/abs/2202.08360 'regnet-y-320-seer': RegNetConfig(depths=[2, 5, 1_2, 1], hidden_sizes=[2_3_2, 6_9_6, 1_3_9_2, 3_7_1_2], groups_width=2_3_2 ), 'regnet-y-640-seer': RegNetConfig(depths=[2, 5, 1_2, 1], hidden_sizes=[3_2_8, 9_8_4, 1_9_6_8, 4_9_2_0], groups_width=3_2_8 ), 'regnet-y-1280-seer': RegNetConfig( depths=[2, 7, 1_7, 1], hidden_sizes=[5_2_8, 1_0_5_6, 2_9_0_4, 7_3_9_2], groups_width=2_6_4 ), 'regnet-y-2560-seer': RegNetConfig( depths=[3, 7, 1_6, 1], hidden_sizes=[6_4_0, 1_6_9_6, 2_5_4_4, 5_0_8_8], groups_width=6_4_0 ), 'regnet-y-10b-seer': ImageNetPreTrainedConfig( depths=[2, 7, 1_7, 1], hidden_sizes=[2_0_2_0, 4_0_4_0, 1_1_1_1_0, 2_8_2_8_0], groups_width=1_0_1_0 ), # finetuned on imagenet 'regnet-y-320-seer-in1k': ImageNetPreTrainedConfig( depths=[2, 5, 1_2, 1], hidden_sizes=[2_3_2, 6_9_6, 1_3_9_2, 3_7_1_2], groups_width=2_3_2 ), 'regnet-y-640-seer-in1k': ImageNetPreTrainedConfig( depths=[2, 5, 1_2, 1], hidden_sizes=[3_2_8, 9_8_4, 1_9_6_8, 4_9_2_0], groups_width=3_2_8 ), 'regnet-y-1280-seer-in1k': ImageNetPreTrainedConfig( depths=[2, 7, 1_7, 1], hidden_sizes=[5_2_8, 1_0_5_6, 2_9_0_4, 7_3_9_2], groups_width=2_6_4 ), 'regnet-y-2560-seer-in1k': ImageNetPreTrainedConfig( depths=[3, 7, 1_6, 1], hidden_sizes=[6_4_0, 1_6_9_6, 2_5_4_4, 5_0_8_8], groups_width=6_4_0 ), 'regnet-y-10b-seer-in1k': ImageNetPreTrainedConfig( depths=[2, 7, 1_7, 1], hidden_sizes=[2_0_2_0, 4_0_4_0, 1_1_1_1_0, 2_8_2_8_0], groups_width=1_0_1_0 ), } _lowerCAmelCase = NameToOurModelFuncMap() _lowerCAmelCase = NameToFromModelFuncMap() # add seer weights logic def load_using_classy_vision(__lowerCamelCase, __lowerCamelCase ) -> Tuple[nn.Module, Dict]: _lowerCAmelCase = torch.hub.load_state_dict_from_url(__lowerCamelCase, model_dir=str(__lowerCamelCase ), map_location='cpu' ) _lowerCAmelCase = model_func() # check if we have a head, if yes add it _lowerCAmelCase = files['classy_state_dict']['base_model']['model'] _lowerCAmelCase = model_state_dict['trunk'] model.load_state_dict(__lowerCamelCase ) return model.eval(), model_state_dict["heads"] # pretrained _lowerCAmelCase = partial( __lowerCamelCase, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch', lambda: FakeRegNetVisslWrapper(RegNetYaagf() ), ) _lowerCAmelCase = partial( __lowerCamelCase, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch', lambda: FakeRegNetVisslWrapper(RegNetYaagf() ), ) _lowerCAmelCase = partial( __lowerCamelCase, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch', lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ), ) _lowerCAmelCase = partial( __lowerCamelCase, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch', lambda: FakeRegNetVisslWrapper( RegNet(RegNetParams(depth=2_7, group_width=1_0_1_0, w_a=1_7_4_4, w_a=620.83, w_m=2.52 ) ) ), ) # IN1K finetuned _lowerCAmelCase = partial( __lowerCamelCase, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch', lambda: FakeRegNetVisslWrapper(RegNetYaagf() ), ) _lowerCAmelCase = partial( __lowerCamelCase, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch', lambda: FakeRegNetVisslWrapper(RegNetYaagf() ), ) _lowerCAmelCase = partial( __lowerCamelCase, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch', lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ), ) _lowerCAmelCase = partial( __lowerCamelCase, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch', lambda: FakeRegNetVisslWrapper( RegNet(RegNetParams(depth=2_7, group_width=1_0_1_0, w_a=1_7_4_4, w_a=620.83, w_m=2.52 ) ) ), ) if model_name: convert_weight_and_push( __lowerCamelCase, names_to_from_model_map[model_name], names_to_ours_model_map[model_name], names_to_config[model_name], __lowerCamelCase, __lowerCamelCase, ) else: for model_name, config in names_to_config.items(): convert_weight_and_push( __lowerCamelCase, names_to_from_model_map[model_name], names_to_ours_model_map[model_name], __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, ) return config, expected_shape if __name__ == "__main__": a__ : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default=None, type=str, help=( """The name of the model you wish to convert, it must be one of the supported regnet* architecture,""" """ currently: regnetx-*, regnety-*. If `None`, all of them will the converted.""" ), ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=Path, required=True, help="""Path to the output PyTorch model directory.""", ) parser.add_argument( """--push_to_hub""", default=True, type=bool, required=False, help="""If True, push model and image processor to the hub.""", ) a__ : Optional[int] = parser.parse_args() a__ : Path = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
589
1
'''simple docstring''' import argparse import json from tqdm import tqdm def _snake_case ( ) -> Dict: __a : str = argparse.ArgumentParser() # Required parameters parser.add_argument( """--src_path""" , type=lowercase , default="""biencoder-nq-dev.json""" , help="""Path to raw DPR training data""" , ) parser.add_argument( """--evaluation_set""" , type=lowercase , help="""where to store parsed evaluation_set file""" , ) parser.add_argument( """--gold_data_path""" , type=lowercase , help="""where to store parsed gold_data_path file""" , ) __a : Union[str, Any] = parser.parse_args() with open(args.src_path , """r""" ) as src_file, open(args.evaluation_set , """w""" ) as eval_file, open( args.gold_data_path , """w""" ) as gold_file: __a : List[str] = json.load(lowercase ) for dpr_record in tqdm(lowercase ): __a : Dict = dpr_record["""question"""] __a : Dict = [context["""title"""] for context in dpr_record["""positive_ctxs"""]] eval_file.write(question + """\n""" ) gold_file.write("""\t""".join(lowercase ) + """\n""" ) if __name__ == "__main__": main()
697
'''simple docstring''' from __future__ import annotations import bisect def _snake_case ( lowercase , lowercase , lowercase = 0 , lowercase = -1 ) -> int: if hi < 0: __a : Union[str, Any] = len(lowercase ) while lo < hi: __a : List[str] = lo + (hi - lo) // 2 if sorted_collection[mid] < item: __a : int = mid + 1 else: __a : int = mid return lo def _snake_case ( lowercase , lowercase , lowercase = 0 , lowercase = -1 ) -> int: if hi < 0: __a : Any = len(lowercase ) while lo < hi: __a : Any = lo + (hi - lo) // 2 if sorted_collection[mid] <= item: __a : List[str] = mid + 1 else: __a : Any = mid return lo def _snake_case ( lowercase , lowercase , lowercase = 0 , lowercase = -1 ) -> None: sorted_collection.insert(bisect_left(lowercase , lowercase , lowercase , lowercase ) , lowercase ) def _snake_case ( lowercase , lowercase , lowercase = 0 , lowercase = -1 ) -> None: sorted_collection.insert(bisect_right(lowercase , lowercase , lowercase , lowercase ) , lowercase ) def _snake_case ( lowercase , lowercase ) -> int | None: __a : Dict = 0 __a : Any = len(lowercase ) - 1 while left <= right: __a : str = left + (right - left) // 2 __a : List[Any] = sorted_collection[midpoint] if current_item == item: return midpoint elif item < current_item: __a : Optional[Any] = midpoint - 1 else: __a : Optional[int] = midpoint + 1 return None def _snake_case ( lowercase , lowercase ) -> int | None: __a : Optional[int] = bisect.bisect_left(lowercase , lowercase ) if index != len(lowercase ) and sorted_collection[index] == item: return index return None def _snake_case ( lowercase , lowercase , lowercase , lowercase ) -> int | None: if right < left: return None __a : Any = left + (right - left) // 2 if sorted_collection[midpoint] == item: return midpoint elif sorted_collection[midpoint] > item: return binary_search_by_recursion(lowercase , lowercase , lowercase , midpoint - 1 ) else: return binary_search_by_recursion(lowercase , lowercase , midpoint + 1 , lowercase ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : List[Any] = input('Enter numbers separated by comma:\n').strip() __SCREAMING_SNAKE_CASE : Optional[Any] = sorted(int(item) for item in user_input.split(',')) __SCREAMING_SNAKE_CASE : List[str] = int(input('Enter a single number to be found in the list:\n')) __SCREAMING_SNAKE_CASE : Optional[int] = binary_search(collection, target) if result is None: print(f'''{target} was not found in {collection}.''') else: print(f'''{target} was found at position {result} in {collection}.''')
697
1
import logging import os from typing import Dict, List, Optional, Union import torch import torch.nn as nn from accelerate.utils.imports import ( is_abit_bnb_available, is_abit_bnb_available, is_bnb_available, ) from ..big_modeling import dispatch_model, init_empty_weights from .dataclasses import BnbQuantizationConfig from .modeling import ( find_tied_parameters, get_balanced_memory, infer_auto_device_map, load_checkpoint_in_model, offload_weight, set_module_tensor_to_device, ) if is_bnb_available(): import bitsandbytes as bnb from copy import deepcopy A__ : List[Any] = logging.getLogger(__name__) def UpperCamelCase( __UpperCamelCase : torch.nn.Module ,__UpperCamelCase : BnbQuantizationConfig ,__UpperCamelCase : Union[str, os.PathLike] = None ,__UpperCamelCase : Optional[Dict[str, Union[int, str, torch.device]]] = None ,__UpperCamelCase : Optional[List[str]] = None ,__UpperCamelCase : Optional[Dict[Union[int, str], Union[int, str]]] = None ,__UpperCamelCase : Optional[Union[str, os.PathLike]] = None ,__UpperCamelCase : bool = False ,): lowerCAmelCase_ : Dict = bnb_quantization_config.load_in_abit lowerCAmelCase_ : int = bnb_quantization_config.load_in_abit if load_in_abit and not is_abit_bnb_available(): raise ImportError( '''You have a version of `bitsandbytes` that is not compatible with 8bit quantization,''' ''' make sure you have the latest version of `bitsandbytes` installed.''' ) if load_in_abit and not is_abit_bnb_available(): raise ValueError( '''You have a version of `bitsandbytes` that is not compatible with 4bit quantization,''' '''make sure you have the latest version of `bitsandbytes` installed.''' ) lowerCAmelCase_ : Optional[int] = [] # custom device map if isinstance(__UpperCamelCase ,__UpperCamelCase ) and len(device_map.keys() ) > 1: lowerCAmelCase_ : Dict = [key for key, value in device_map.items() if value in ['''disk''', '''cpu''']] # We keep some modules such as the lm_head in their original dtype for numerical stability reasons if bnb_quantization_config.skip_modules is None: lowerCAmelCase_ : List[Any] = get_keys_to_not_convert(__UpperCamelCase ) # add cpu modules to skip modules only for 4-bit modules if load_in_abit: bnb_quantization_config.skip_modules.extend(__UpperCamelCase ) lowerCAmelCase_ : Optional[Any] = bnb_quantization_config.skip_modules # We add the modules we want to keep in full precision if bnb_quantization_config.keep_in_fpaa_modules is None: lowerCAmelCase_ : int = [] lowerCAmelCase_ : Any = bnb_quantization_config.keep_in_fpaa_modules modules_to_not_convert.extend(__UpperCamelCase ) # compatibility with peft lowerCAmelCase_ : List[str] = load_in_abit lowerCAmelCase_ : Tuple = load_in_abit lowerCAmelCase_ : str = get_parameter_device(__UpperCamelCase ) if model_device.type != "meta": # quantization of an already loaded model logger.warning( '''It is not recommended to quantize a loaded model. ''' '''The model should be instantiated under the `init_empty_weights` context manager.''' ) lowerCAmelCase_ : Union[str, Any] = replace_with_bnb_layers(__UpperCamelCase ,__UpperCamelCase ,modules_to_not_convert=__UpperCamelCase ) # convert param to the right dtype lowerCAmelCase_ : Optional[int] = bnb_quantization_config.torch_dtype for name, param in model.state_dict().items(): if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ): param.to(torch.floataa ) if param.dtype != torch.floataa: lowerCAmelCase_ : List[str] = name.replace('''.weight''' ,'''''' ).replace('''.bias''' ,'''''' ) lowerCAmelCase_ : Dict = getattr(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) if param is not None: param.to(torch.floataa ) elif torch.is_floating_point(__UpperCamelCase ): param.to(__UpperCamelCase ) if model_device.type == "cuda": # move everything to cpu in the first place because we can't do quantization if the weights are already on cuda model.cuda(torch.cuda.current_device() ) torch.cuda.empty_cache() elif torch.cuda.is_available(): model.to(torch.cuda.current_device() ) else: raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' ) logger.info( f"""The model device type is {model_device.type}. However, cuda is needed for quantization.""" '''We move the model to cuda.''' ) return model elif weights_location is None: raise RuntimeError( f"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ ) else: with init_empty_weights(): lowerCAmelCase_ : Optional[Any] = replace_with_bnb_layers( __UpperCamelCase ,__UpperCamelCase ,modules_to_not_convert=__UpperCamelCase ) lowerCAmelCase_ : int = get_quantized_model_device_map( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,max_memory=__UpperCamelCase ,no_split_module_classes=__UpperCamelCase ,) if offload_state_dict is None and device_map is not None and "disk" in device_map.values(): lowerCAmelCase_ : Optional[Any] = True lowerCAmelCase_ : int = any(x in list(device_map.values() ) for x in ['''cpu''', '''disk'''] ) load_checkpoint_in_model( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,dtype=bnb_quantization_config.torch_dtype ,offload_folder=__UpperCamelCase ,offload_state_dict=__UpperCamelCase ,keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules ,offload_abit_bnb=load_in_abit and offload ,) return dispatch_model(__UpperCamelCase ,device_map=__UpperCamelCase ,offload_dir=__UpperCamelCase ) def UpperCamelCase( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : Optional[int]=None ,__UpperCamelCase : Dict=None ,__UpperCamelCase : str=None ): if device_map is None: if torch.cuda.is_available(): lowerCAmelCase_ : str = {'''''': torch.cuda.current_device()} else: raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' ) logger.info('''The device_map was not initialized.''' '''Setting device_map to `{\'\':torch.cuda.current_device()}`.''' ) if isinstance(__UpperCamelCase ,__UpperCamelCase ): if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: raise ValueError( '''If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or ''' '''\'sequential\'.''' ) lowerCAmelCase_ : List[Any] = {} special_dtypes.update( { name: bnb_quantization_config.torch_dtype for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.skip_modules ) } ) special_dtypes.update( { name: torch.floataa for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules ) } ) lowerCAmelCase_ : Union[str, Any] = {} lowerCAmelCase_ : Any = special_dtypes lowerCAmelCase_ : Tuple = no_split_module_classes lowerCAmelCase_ : Optional[int] = bnb_quantization_config.target_dtype # get max_memory for each device. if device_map != "sequential": lowerCAmelCase_ : Tuple = get_balanced_memory( __UpperCamelCase ,low_zero=(device_map == '''balanced_low_0''') ,max_memory=__UpperCamelCase ,**__UpperCamelCase ,) lowerCAmelCase_ : Tuple = max_memory lowerCAmelCase_ : Dict = infer_auto_device_map(__UpperCamelCase ,**__UpperCamelCase ) if isinstance(__UpperCamelCase ,__UpperCamelCase ): # check if don't have any quantized module on the cpu lowerCAmelCase_ : int = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules lowerCAmelCase_ : Tuple = { key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert } for device in ["cpu", "disk"]: if device in device_map_without_some_modules.values(): if bnb_quantization_config.load_in_abit: raise ValueError( ''' Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit the quantized model. If you want to dispatch the model on the CPU or the disk while keeping these modules in `torch_dtype`, you need to pass a custom `device_map` to `load_and_quantize_model`. Check https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk for more details. ''' ) else: logger.info( '''Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit''' ) del device_map_without_some_modules return device_map def UpperCamelCase( __UpperCamelCase : Any ,__UpperCamelCase : List[str] ,__UpperCamelCase : int=None ,__UpperCamelCase : List[str]=None ): if modules_to_not_convert is None: lowerCAmelCase_ : Any = [] lowerCAmelCase_ , lowerCAmelCase_ : Dict = _replace_with_bnb_layers( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) if not has_been_replaced: logger.warning( '''You are loading your model in 8bit or 4bit but no linear modules were found in your model.''' ''' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.''' ''' Please double check your model architecture, or submit an issue on github if you think this is''' ''' a bug.''' ) return model def UpperCamelCase( __UpperCamelCase : Any ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : str=None ,__UpperCamelCase : Dict=None ,): lowerCAmelCase_ : List[Any] = False for name, module in model.named_children(): if current_key_name is None: lowerCAmelCase_ : str = [] current_key_name.append(__UpperCamelCase ) if isinstance(__UpperCamelCase ,nn.Linear ) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` lowerCAmelCase_ : Optional[Any] = '''.'''.join(__UpperCamelCase ) lowerCAmelCase_ : Union[str, Any] = True for key in modules_to_not_convert: if ( (key in current_key_name_str) and (key + "." in current_key_name_str) ) or key == current_key_name_str: lowerCAmelCase_ : Any = False break if proceed: # Load bnb module with empty weight and replace ``nn.Linear` module if bnb_quantization_config.load_in_abit: lowerCAmelCase_ : Optional[Any] = bnb.nn.LinearabitLt( module.in_features ,module.out_features ,module.bias is not None ,has_fpaa_weights=__UpperCamelCase ,threshold=bnb_quantization_config.llm_inta_threshold ,) elif bnb_quantization_config.load_in_abit: lowerCAmelCase_ : Optional[Any] = bnb.nn.Linearabit( module.in_features ,module.out_features ,module.bias is not None ,bnb_quantization_config.bnb_abit_compute_dtype ,compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant ,quant_type=bnb_quantization_config.bnb_abit_quant_type ,) else: raise ValueError('''load_in_8bit and load_in_4bit can\'t be both False''' ) lowerCAmelCase_ : List[str] = module.weight.data if module.bias is not None: lowerCAmelCase_ : List[str] = module.bias.data bnb_module.requires_grad_(__UpperCamelCase ) setattr(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) lowerCAmelCase_ : Optional[Any] = True if len(list(module.children() ) ) > 0: lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = _replace_with_bnb_layers( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) lowerCAmelCase_ : List[str] = has_been_replaced | _has_been_replaced # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def UpperCamelCase( __UpperCamelCase : List[Any] ): # Create a copy of the model with init_empty_weights(): lowerCAmelCase_ : Optional[Any] = deepcopy(__UpperCamelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager` lowerCAmelCase_ : List[str] = find_tied_parameters(__UpperCamelCase ) # For compatibility with Accelerate < 0.18 if isinstance(__UpperCamelCase ,__UpperCamelCase ): lowerCAmelCase_ : Union[str, Any] = sum(list(tied_params.values() ) ,[] ) + list(tied_params.keys() ) else: lowerCAmelCase_ : List[Any] = sum(__UpperCamelCase ,[] ) lowerCAmelCase_ : Tuple = len(__UpperCamelCase ) > 0 # Check if it is a base model lowerCAmelCase_ : Optional[Any] = False if hasattr(__UpperCamelCase ,'''base_model_prefix''' ): lowerCAmelCase_ : List[Any] = not hasattr(__UpperCamelCase ,model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head lowerCAmelCase_ : List[str] = list(model.named_children() ) lowerCAmelCase_ : Any = [list_modules[-1][0]] # add last module together with tied weights lowerCAmelCase_ : Tuple = set(__UpperCamelCase ) - set(__UpperCamelCase ) lowerCAmelCase_ : List[str] = list(set(__UpperCamelCase ) ) + list(__UpperCamelCase ) # remove ".weight" from the keys lowerCAmelCase_ : str = ['''.weight''', '''.bias'''] lowerCAmelCase_ : str = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: lowerCAmelCase_ : List[str] = name.replace(__UpperCamelCase ,'''''' ) filtered_module_names.append(__UpperCamelCase ) return filtered_module_names def UpperCamelCase( __UpperCamelCase : Optional[int] ): for m in model.modules(): if isinstance(__UpperCamelCase ,bnb.nn.Linearabit ): return True return False def UpperCamelCase( __UpperCamelCase : nn.Module ): return next(parameter.parameters() ).device def UpperCamelCase( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : List[Any] ,__UpperCamelCase : Tuple ,__UpperCamelCase : int ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : Any ,__UpperCamelCase : Optional[int] ): # if it is not quantized, we quantize and offload the quantized weights and the SCB stats if fpaa_statistics is None: set_module_tensor_to_device(__UpperCamelCase ,__UpperCamelCase ,0 ,dtype=__UpperCamelCase ,value=__UpperCamelCase ) lowerCAmelCase_ : Tuple = param_name lowerCAmelCase_ : Optional[int] = model if "." in tensor_name: lowerCAmelCase_ : Dict = tensor_name.split('''.''' ) for split in splits[:-1]: lowerCAmelCase_ : str = getattr(__UpperCamelCase ,__UpperCamelCase ) if new_module is None: raise ValueError(f"""{module} has no attribute {split}.""" ) lowerCAmelCase_ : List[Any] = new_module lowerCAmelCase_ : Tuple = splits[-1] # offload weights lowerCAmelCase_ : Dict = False offload_weight(module._parameters[tensor_name] ,__UpperCamelCase ,__UpperCamelCase ,index=__UpperCamelCase ) if hasattr(module._parameters[tensor_name] ,'''SCB''' ): offload_weight( module._parameters[tensor_name].SCB ,param_name.replace('''weight''' ,'''SCB''' ) ,__UpperCamelCase ,index=__UpperCamelCase ,) else: offload_weight(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,index=__UpperCamelCase ) offload_weight(__UpperCamelCase ,param_name.replace('''weight''' ,'''SCB''' ) ,__UpperCamelCase ,index=__UpperCamelCase ) set_module_tensor_to_device(__UpperCamelCase ,__UpperCamelCase ,'''meta''' ,dtype=__UpperCamelCase ,value=torch.empty(*param.size() ) )
171
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ..models.auto import AutoModelForVisionaSeq from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class __snake_case ( UpperCamelCase_ ): _a = '''Salesforce/blip-image-captioning-base''' _a = ( '''This is a tool that generates a description of an image. It takes an input named `image` which should be the ''' '''image to caption, and returns a text that contains the description in English.''' ) _a = '''image_captioner''' _a = AutoModelForVisionaSeq _a = ['''image'''] _a = ['''text'''] def __init__( self : Optional[Any] , *A_ : Dict , **A_ : List[str]): requires_backends(self , ['''vision''']) super().__init__(*A_ , **A_) def UpperCAmelCase__ ( self : Any , A_ : "Image"): return self.pre_processor(images=A_ , return_tensors='''pt''') def UpperCAmelCase__ ( self : Dict , A_ : Any): return self.model.generate(**A_) def UpperCAmelCase__ ( self : List[str] , A_ : Any): return self.pre_processor.batch_decode(A_ , skip_special_tokens=A_)[0].strip()
171
1
def A_ ( lowercase_ ) ->int: """simple docstring""" SCREAMING_SNAKE_CASE = 1 for i in range(1 , num + 1 ): fact *= i return fact def A_ ( lowercase_ ) ->int: """simple docstring""" SCREAMING_SNAKE_CASE = 0 while number > 0: SCREAMING_SNAKE_CASE = number % 1_0 sum_of_digits += last_digit SCREAMING_SNAKE_CASE = number // 1_0 # Removing the last_digit from the given number return sum_of_digits def A_ ( lowercase_ = 1_0_0 ) ->int: """simple docstring""" SCREAMING_SNAKE_CASE = factorial(_lowerCamelCase ) SCREAMING_SNAKE_CASE = split_and_add(_lowerCamelCase ) return result if __name__ == "__main__": print(solution(int(input("Enter the Number: ").strip())))
706
import argparse from collections import defaultdict import yaml __UpperCAmelCase = "docs/source/en/_toctree.yml" def A_ ( lowercase_ ) ->Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE = defaultdict(lowercase_ ) for doc in model_doc: counts[doc["local"]] += 1 SCREAMING_SNAKE_CASE = [key for key, value in counts.items() if value > 1] SCREAMING_SNAKE_CASE = [] for duplicate_key in duplicates: SCREAMING_SNAKE_CASE = list({doc['title'] for doc in model_doc if doc['local'] == duplicate_key} ) if len(lowercase_ ) > 1: raise ValueError( f'''{duplicate_key} is present several times in the documentation table of content at ''' '`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the ' 'others.' ) # Only add this once new_doc.append({'local': duplicate_key, 'title': titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in model_doc if counts[doc['local']] == 1] ) # Sort return sorted(lowercase_ , key=lambda lowercase_ : s["title"].lower() ) def A_ ( lowercase_=False ) ->List[Any]: """simple docstring""" with open(lowercase_ , encoding='utf-8' ) as f: SCREAMING_SNAKE_CASE = yaml.safe_load(f.read() ) # Get to the API doc SCREAMING_SNAKE_CASE = 0 while content[api_idx]["title"] != "API": api_idx += 1 SCREAMING_SNAKE_CASE = content[api_idx]['sections'] # Then to the model doc SCREAMING_SNAKE_CASE = 0 while api_doc[model_idx]["title"] != "Models": model_idx += 1 SCREAMING_SNAKE_CASE = api_doc[model_idx]['sections'] SCREAMING_SNAKE_CASE = [(idx, section) for idx, section in enumerate(lowercase_ ) if 'sections' in section] SCREAMING_SNAKE_CASE = False for idx, modality_doc in modalities_docs: SCREAMING_SNAKE_CASE = modality_doc['sections'] SCREAMING_SNAKE_CASE = clean_model_doc_toc(lowercase_ ) if old_modality_doc != new_modality_doc: SCREAMING_SNAKE_CASE = True if overwrite: SCREAMING_SNAKE_CASE = new_modality_doc if diff: if overwrite: SCREAMING_SNAKE_CASE = model_doc SCREAMING_SNAKE_CASE = api_doc with open(lowercase_ , 'w' , encoding='utf-8' ) as f: f.write(yaml.dump(lowercase_ , allow_unicode=lowercase_ ) ) else: raise ValueError( 'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.") __UpperCAmelCase = parser.parse_args() check_model_doc(args.fix_and_overwrite)
259
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tensorflow_text_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _a : Optional[Any] = { '''configuration_bert''': ['''BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BertConfig''', '''BertOnnxConfig'''], '''tokenization_bert''': ['''BasicTokenizer''', '''BertTokenizer''', '''WordpieceTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : List[str] = ['''BertTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : List[str] = [ '''BERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BertForMaskedLM''', '''BertForMultipleChoice''', '''BertForNextSentencePrediction''', '''BertForPreTraining''', '''BertForQuestionAnswering''', '''BertForSequenceClassification''', '''BertForTokenClassification''', '''BertLayer''', '''BertLMHeadModel''', '''BertModel''', '''BertPreTrainedModel''', '''load_tf_weights_in_bert''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : Union[str, Any] = [ '''TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFBertEmbeddings''', '''TFBertForMaskedLM''', '''TFBertForMultipleChoice''', '''TFBertForNextSentencePrediction''', '''TFBertForPreTraining''', '''TFBertForQuestionAnswering''', '''TFBertForSequenceClassification''', '''TFBertForTokenClassification''', '''TFBertLMHeadModel''', '''TFBertMainLayer''', '''TFBertModel''', '''TFBertPreTrainedModel''', ] try: if not is_tensorflow_text_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : List[Any] = ['''TFBertTokenizer'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : Optional[Any] = [ '''FlaxBertForCausalLM''', '''FlaxBertForMaskedLM''', '''FlaxBertForMultipleChoice''', '''FlaxBertForNextSentencePrediction''', '''FlaxBertForPreTraining''', '''FlaxBertForQuestionAnswering''', '''FlaxBertForSequenceClassification''', '''FlaxBertForTokenClassification''', '''FlaxBertModel''', '''FlaxBertPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bert_fast import BertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bert import ( BERT_PRETRAINED_MODEL_ARCHIVE_LIST, BertForMaskedLM, BertForMultipleChoice, BertForNextSentencePrediction, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertForTokenClassification, BertLayer, BertLMHeadModel, BertModel, BertPreTrainedModel, load_tf_weights_in_bert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_bert import ( TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFBertEmbeddings, TFBertForMaskedLM, TFBertForMultipleChoice, TFBertForNextSentencePrediction, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertForTokenClassification, TFBertLMHeadModel, TFBertMainLayer, TFBertModel, TFBertPreTrainedModel, ) try: if not is_tensorflow_text_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bert_tf import TFBertTokenizer try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_bert import ( FlaxBertForCausalLM, FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForNextSentencePrediction, FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertModel, FlaxBertPreTrainedModel, ) else: import sys _a : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
598
'''simple docstring''' import sys from collections import defaultdict class UpperCamelCase__ : """simple docstring""" def __init__( self : List[str] ): """simple docstring""" _lowercase = [] def snake_case ( self : Optional[Any] , __A : List[str] ): """simple docstring""" return self.node_position[vertex] def snake_case ( self : Any , __A : Dict , __A : List[str] ): """simple docstring""" _lowercase = pos def snake_case ( self : Optional[int] , __A : Any , __A : List[Any] , __A : Union[str, Any] , __A : Any ): """simple docstring""" if start > size // 2 - 1: return else: if 2 * start + 2 >= size: _lowercase = 2 * start + 1 else: if heap[2 * start + 1] < heap[2 * start + 2]: _lowercase = 2 * start + 1 else: _lowercase = 2 * start + 2 if heap[smallest_child] < heap[start]: _lowercase , _lowercase = heap[smallest_child], positions[smallest_child] _lowercase , _lowercase = ( heap[start], positions[start], ) _lowercase , _lowercase = temp, tempa _lowercase = self.get_position(positions[smallest_child] ) self.set_position( positions[smallest_child] , self.get_position(positions[start] ) ) self.set_position(positions[start] , __A ) self.top_to_bottom(__A , __A , __A , __A ) def snake_case ( self : Dict , __A : Tuple , __A : Union[str, Any] , __A : Union[str, Any] , __A : Union[str, Any] ): """simple docstring""" _lowercase = position[index] while index != 0: _lowercase = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 ) if val < heap[parent]: _lowercase = heap[parent] _lowercase = position[parent] self.set_position(position[parent] , __A ) else: _lowercase = val _lowercase = temp self.set_position(__A , __A ) break _lowercase = parent else: _lowercase = val _lowercase = temp self.set_position(__A , 0 ) def snake_case ( self : int , __A : List[str] , __A : List[str] ): """simple docstring""" _lowercase = len(__A ) // 2 - 1 for i in range(__A , -1 , -1 ): self.top_to_bottom(__A , __A , len(__A ) , __A ) def snake_case ( self : int , __A : Optional[int] , __A : str ): """simple docstring""" _lowercase = positions[0] _lowercase = sys.maxsize self.top_to_bottom(__A , 0 , len(__A ) , __A ) return temp def A__ ( A_ ) -> int: _lowercase = Heap() _lowercase = [0] * len(A_ ) _lowercase = [-1] * len(A_ ) # Neighboring Tree Vertex of selected vertex # Minimum Distance of explored vertex with neighboring vertex of partial tree # formed in graph _lowercase = [] # Heap of Distance of vertices from their neighboring vertex _lowercase = [] for vertex in range(len(A_ ) ): distance_tv.append(sys.maxsize ) positions.append(A_ ) heap.node_position.append(A_ ) _lowercase = [] _lowercase = 1 _lowercase = sys.maxsize for neighbor, distance in adjacency_list[0]: _lowercase = 0 _lowercase = distance heap.heapify(A_ , A_ ) for _ in range(1 , len(A_ ) ): _lowercase = heap.delete_minimum(A_ , A_ ) if visited[vertex] == 0: tree_edges.append((nbr_tv[vertex], vertex) ) _lowercase = 1 for neighbor, distance in adjacency_list[vertex]: if ( visited[neighbor] == 0 and distance < distance_tv[heap.get_position(A_ )] ): _lowercase = distance heap.bottom_to_top( A_ , heap.get_position(A_ ) , A_ , A_ ) _lowercase = vertex return tree_edges if __name__ == "__main__": # pragma: no cover # < --------- Prims Algorithm --------- > __magic_name__ : List[str] = int(input('''Enter number of edges: ''').strip()) __magic_name__ : List[Any] = defaultdict(list) for _ in range(edges_number): __magic_name__ : Union[str, Any] = [int(x) for x in input().strip().split()] adjacency_list[edge[0]].append([edge[1], edge[2]]) adjacency_list[edge[1]].append([edge[0], edge[2]]) print(prisms_algorithm(adjacency_list))
497
0
"""simple docstring""" import argparse from collections import OrderedDict from pathlib import Path import torch from transformers import ( VisualBertConfig, VisualBertForMultipleChoice, VisualBertForPreTraining, VisualBertForQuestionAnswering, VisualBertForVisualReasoning, ) from transformers.utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE__:List[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__:Tuple = [ ("bert.bert", "visual_bert"), ("bert.cls", "cls"), ("bert.classifier", "cls"), ("token_type_embeddings_visual", "visual_token_type_embeddings"), ("position_embeddings_visual", "visual_position_embeddings"), ("projection", "visual_projection"), ] SCREAMING_SNAKE_CASE__:int = [ "nlvr2_coco_pre_trained.th", "nlvr2_fine_tuned.th", "nlvr2_pre_trained.th", "vcr_coco_pre_train.th", "vcr_fine_tune.th", "vcr_pre_train.th", "vqa_coco_pre_trained.th", "vqa_fine_tuned.th", "vqa_pre_trained.th", ] def _lowerCamelCase( a ): __a = torch.load(a , map_location="cpu" ) return sd def _lowerCamelCase( a , a , a=rename_keys_prefix ): __a = OrderedDict() __a = torch.arange(config.max_position_embeddings ).expand((1, -1) ) # detector_d = OrderedDict() for key in d: if "detector" in key: # detector_d[key.replace('detector.','')] = d[key] continue __a = key for name_pair in rename_keys_prefix: __a = new_key.replace(name_pair[0] , name_pair[1] ) __a = d[key] if key == "bert.cls.predictions.decoder.weight": # Old bert code didn't have `decoder.bias`, but was added separately __a = new_d["cls.predictions.bias"] return new_d @torch.no_grad() def _lowerCamelCase( a , a ): assert ( checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS ), F"The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}." # Get Config if "pre" in checkpoint_path: __a = "pretraining" if "vcr" in checkpoint_path: __a = {"visual_embedding_dim": 5_1_2} elif "vqa_advanced" in checkpoint_path: __a = {"visual_embedding_dim": 2_0_4_8} elif "vqa" in checkpoint_path: __a = {"visual_embedding_dim": 2_0_4_8} elif "nlvr" in checkpoint_path: __a = {"visual_embedding_dim": 1_0_2_4} else: raise NotImplementedError(F"No implementation found for `{checkpoint_path}`." ) else: if "vcr" in checkpoint_path: __a = {"visual_embedding_dim": 5_1_2} __a = "multichoice" elif "vqa_advanced" in checkpoint_path: __a = {"visual_embedding_dim": 2_0_4_8} __a = "vqa_advanced" elif "vqa" in checkpoint_path: __a = {"visual_embedding_dim": 2_0_4_8, "num_labels": 3_1_2_9} __a = "vqa" elif "nlvr" in checkpoint_path: __a = { "visual_embedding_dim": 1_0_2_4, "num_labels": 2, } __a = "nlvr" __a = VisualBertConfig(**a ) # Load State Dict __a = load_state_dict(a ) __a = get_new_dict(a , a ) if model_type == "pretraining": __a = VisualBertForPreTraining(a ) elif model_type == "vqa": __a = VisualBertForQuestionAnswering(a ) elif model_type == "nlvr": __a = VisualBertForVisualReasoning(a ) elif model_type == "multichoice": __a = VisualBertForMultipleChoice(a ) model.load_state_dict(a ) # Save Checkpoints Path(a ).mkdir(exist_ok=a ) model.save_pretrained(a ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__:Any = argparse.ArgumentParser() # Required parameters parser.add_argument("""orig_checkpoint_path""", type=str, help="""A path to .th on local filesystem.""") parser.add_argument("""pytorch_dump_folder_path""", type=str, help="""Path to the output PyTorch model.""") SCREAMING_SNAKE_CASE__:Optional[int] = parser.parse_args() convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
704
"""simple docstring""" import argparse import OmegaConf import torch from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel def _lowerCamelCase( a , a , a ): __a = OmegaConf.load(a ) __a = torch.load(a , map_location="cpu" )["model"] __a = list(state_dict.keys() ) # extract state_dict for VQVAE __a = {} __a = "first_stage_model." for key in keys: if key.startswith(a ): __a = state_dict[key] # extract state_dict for UNetLDM __a = {} __a = "model.diffusion_model." for key in keys: if key.startswith(a ): __a = state_dict[key] __a = config.model.params.first_stage_config.params __a = config.model.params.unet_config.params __a = VQModel(**a ).eval() vqvae.load_state_dict(a ) __a = UNetLDMModel(**a ).eval() unet.load_state_dict(a ) __a = DDIMScheduler( timesteps=config.model.params.timesteps , beta_schedule="scaled_linear" , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=a , ) __a = LDMPipeline(a , a , a ) pipeline.save_pretrained(a ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__:List[Any] = argparse.ArgumentParser() parser.add_argument("""--checkpoint_path""", type=str, required=True) parser.add_argument("""--config_path""", type=str, required=True) parser.add_argument("""--output_path""", type=str, required=True) SCREAMING_SNAKE_CASE__:Union[str, Any] = parser.parse_args() convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
67
0
"""simple docstring""" import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin class _lowerCAmelCase ( unittest.TestCase , SCREAMING_SNAKE_CASE__ ): """simple docstring""" def UpperCAmelCase__ ( self ) -> Union[str, Any]: '''simple docstring''' snake_case_ : List[str] = load_tool("""text-classification""" ) self.tool.setup() snake_case_ : Union[str, Any] = load_tool("""text-classification""" , remote=_lowercase ) def UpperCAmelCase__ ( self ) -> List[str]: '''simple docstring''' snake_case_ : List[str] = self.tool("""That's quite cool""" , ["""positive""", """negative"""] ) self.assertEqual(_lowercase , """positive""" ) def UpperCAmelCase__ ( self ) -> int: '''simple docstring''' snake_case_ : Tuple = self.remote_tool("""That's quite cool""" , ["""positive""", """negative"""] ) self.assertEqual(_lowercase , """positive""" ) def UpperCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' snake_case_ : Union[str, Any] = self.tool(text="""That's quite cool""" , labels=["""positive""", """negative"""] ) self.assertEqual(_lowercase , """positive""" ) def UpperCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' snake_case_ : Any = self.remote_tool(text="""That's quite cool""" , labels=["""positive""", """negative"""] ) self.assertEqual(_lowercase , """positive""" )
58
'''simple docstring''' def UpperCamelCase_ ( A__ , A__ , A__ ): def count_of_possible_combinations(A__ ) -> int: if target < 0: return 0 if target == 0: return 1 return sum(count_of_possible_combinations(target - item ) for item in array ) return count_of_possible_combinations(A__ ) def UpperCamelCase_ ( A__ , A__ , A__ ): def count_of_possible_combinations_with_dp_array( A__ , A__ ) -> int: if target < 0: return 0 if target == 0: return 1 if dp_array[target] != -1: return dp_array[target] a_ = sum( count_of_possible_combinations_with_dp_array(target - item , A__ ) for item in array ) a_ = answer return answer a_ = [-1] * (target + 1) return count_of_possible_combinations_with_dp_array(A__ , A__ ) def UpperCamelCase_ ( A__ , A__ , A__ ): a_ = [0] * (target + 1) a_ = 1 for i in range(1 , target + 1 ): for j in range(A__ ): if i - array[j] >= 0: dp_array[i] += dp_array[i - array[j]] return dp_array[target] if __name__ == "__main__": import doctest doctest.testmod() lowercase__ =3 lowercase__ =5 lowercase__ =[1, 2, 5] print(combination_sum_iv(n, array, target))
263
0
from __future__ import annotations snake_case_ : str = 1.60_21e-19 # units = C def lowerCamelCase( a__ ,a__ ,a__ ,): if (conductivity, electron_conc, mobility).count(0) != 1: raise ValueError('''You cannot supply more or less than 2 values''') elif conductivity < 0: raise ValueError('''Conductivity cannot be negative''') elif electron_conc < 0: raise ValueError('''Electron concentration cannot be negative''') elif mobility < 0: raise ValueError('''mobility cannot be negative''') elif conductivity == 0: return ( "conductivity", mobility * electron_conc * ELECTRON_CHARGE, ) elif electron_conc == 0: return ( "electron_conc", conductivity / (mobility * ELECTRON_CHARGE), ) else: return ( "mobility", conductivity / (electron_conc * ELECTRON_CHARGE), ) if __name__ == "__main__": import doctest doctest.testmod()
191
from collections.abc import Generator def lowerCamelCase( ): _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =0, 1 while True: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =b, a + b yield b def lowerCamelCase( a__ = 1000): _SCREAMING_SNAKE_CASE =1 _SCREAMING_SNAKE_CASE =fibonacci_generator() while len(str(next(a__))) < n: answer += 1 return answer + 1 if __name__ == "__main__": print(solution(int(str(input()).strip())))
191
1
import os import sys import unittest __a: Optional[int] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, '''utils''')) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) __a: Dict = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''') __a: Union[str, Any] = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''') class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): '''simple docstring''' def lowerCamelCase ( self : str ) -> Dict: """simple docstring""" _UpperCAmelCase = get_test_to_tester_mapping(lowerCamelCase ) _UpperCAmelCase = get_test_to_tester_mapping(lowerCamelCase ) _UpperCAmelCase = {"""BertModelTest""": """BertModelTester"""} _UpperCAmelCase = { """BlipModelTest""": """BlipModelTester""", """BlipTextImageModelTest""": """BlipTextImageModelsModelTester""", """BlipTextModelTest""": """BlipTextModelTester""", """BlipTextRetrievalModelTest""": """BlipTextRetrievalModelTester""", """BlipVQAModelTest""": """BlipVQAModelTester""", """BlipVisionModelTest""": """BlipVisionModelTester""", } self.assertEqual(get_test_info.to_json(lowerCamelCase ) , lowerCamelCase ) self.assertEqual(get_test_info.to_json(lowerCamelCase ) , lowerCamelCase ) def lowerCamelCase ( self : Optional[int] ) -> str: """simple docstring""" _UpperCAmelCase = get_model_to_test_mapping(lowerCamelCase ) _UpperCAmelCase = get_model_to_test_mapping(lowerCamelCase ) _UpperCAmelCase = { """BertForMaskedLM""": ["""BertModelTest"""], """BertForMultipleChoice""": ["""BertModelTest"""], """BertForNextSentencePrediction""": ["""BertModelTest"""], """BertForPreTraining""": ["""BertModelTest"""], """BertForQuestionAnswering""": ["""BertModelTest"""], """BertForSequenceClassification""": ["""BertModelTest"""], """BertForTokenClassification""": ["""BertModelTest"""], """BertLMHeadModel""": ["""BertModelTest"""], """BertModel""": ["""BertModelTest"""], } _UpperCAmelCase = { """BlipForConditionalGeneration""": ["""BlipTextImageModelTest"""], """BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTest"""], """BlipForQuestionAnswering""": ["""BlipVQAModelTest"""], """BlipModel""": ["""BlipModelTest"""], """BlipTextModel""": ["""BlipTextModelTest"""], """BlipVisionModel""": ["""BlipVisionModelTest"""], } self.assertEqual(get_test_info.to_json(lowerCamelCase ) , lowerCamelCase ) self.assertEqual(get_test_info.to_json(lowerCamelCase ) , lowerCamelCase ) def lowerCamelCase ( self : int ) -> Any: """simple docstring""" _UpperCAmelCase = get_model_to_tester_mapping(lowerCamelCase ) _UpperCAmelCase = get_model_to_tester_mapping(lowerCamelCase ) _UpperCAmelCase = { """BertForMaskedLM""": ["""BertModelTester"""], """BertForMultipleChoice""": ["""BertModelTester"""], """BertForNextSentencePrediction""": ["""BertModelTester"""], """BertForPreTraining""": ["""BertModelTester"""], """BertForQuestionAnswering""": ["""BertModelTester"""], """BertForSequenceClassification""": ["""BertModelTester"""], """BertForTokenClassification""": ["""BertModelTester"""], """BertLMHeadModel""": ["""BertModelTester"""], """BertModel""": ["""BertModelTester"""], } _UpperCAmelCase = { """BlipForConditionalGeneration""": ["""BlipTextImageModelsModelTester"""], """BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTester"""], """BlipForQuestionAnswering""": ["""BlipVQAModelTester"""], """BlipModel""": ["""BlipModelTester"""], """BlipTextModel""": ["""BlipTextModelTester"""], """BlipVisionModel""": ["""BlipVisionModelTester"""], } self.assertEqual(get_test_info.to_json(lowerCamelCase ) , lowerCamelCase ) self.assertEqual(get_test_info.to_json(lowerCamelCase ) , lowerCamelCase )
108
import pytest from datasets import inspect_metric, list_metrics, load_metric @pytest.fixture def a_ ( __magic_name__ ) -> int: """simple docstring""" monkeypatch.setattr('''datasets.utils.deprecation_utils._emitted_deprecation_warnings''' , set() ) @pytest.fixture def a_ ( __magic_name__ ) -> Optional[Any]: """simple docstring""" class a_ : def __init__( self : List[Any] , UpperCAmelCase__ : Optional[int] ): """simple docstring""" snake_case : Any = metric_id class a_ : A__ : Dict = [MetricMock(a ) for metric_id in ['accuracy', 'mse', 'precision', 'codeparrot/apps_metric']] def lowerCAmelCase( self : Any ): """simple docstring""" return self._metrics monkeypatch.setattr('''datasets.inspect.huggingface_hub''' , HfhMock() ) @pytest.mark.parametrize( '''func, args''' , [(load_metric, ('''metrics/mse''',)), (list_metrics, ()), (inspect_metric, ('''metrics/mse''', '''tmp_path'''))] ) def a_ ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Any: """simple docstring""" if "tmp_path" in args: snake_case : str = tuple(arg if arg != '''tmp_path''' else tmp_path for arg in args ) with pytest.warns(__magic_name__ , match='''https://huggingface.co/docs/evaluate''' ): func(*__magic_name__ )
598
0
import unittest from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin __A = get_tests_dir("fixtures/spiece.model") @require_sentencepiece @require_tokenizers class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' lowercase_ = DebertaVaTokenizer lowercase_ = DebertaVaTokenizerFast lowercase_ = True lowercase_ = True def SCREAMING_SNAKE_CASE_ (self : str) ->Optional[int]: '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing lowerCamelCase__: str =DebertaVaTokenizer(UpperCAmelCase_ , unk_token="<unk>") tokenizer.save_pretrained(self.tmpdirname) def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : List[str]) ->Any: '''simple docstring''' lowerCamelCase__: Union[str, Any] ="this is a test" lowerCamelCase__: Union[str, Any] ="this is a test" return input_text, output_text def SCREAMING_SNAKE_CASE_ (self : Dict) ->Union[str, Any]: '''simple docstring''' lowerCamelCase__: List[Any] ="<pad>" lowerCamelCase__: str =0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_) , UpperCAmelCase_) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_) , UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : str) ->Tuple: '''simple docstring''' lowerCamelCase__: List[str] =list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0] , "<pad>") self.assertEqual(vocab_keys[1] , "<unk>") self.assertEqual(vocab_keys[-1] , "[PAD]") self.assertEqual(len(UpperCAmelCase_) , 30_001) def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Any: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 30_000) def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->int: '''simple docstring''' lowerCamelCase__: str =" \tHeLLo!how \n Are yoU? " lowerCamelCase__: str =["▁hello", "!", "how", "▁are", "▁you", "?"] # fmt: on lowerCamelCase__: Optional[int] =DebertaVaTokenizer(UpperCAmelCase_ , do_lower_case=UpperCAmelCase_) lowerCamelCase__: Optional[int] =tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_)) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_) lowerCamelCase__: Dict =DebertaVaTokenizerFast(UpperCAmelCase_ , do_lower_case=UpperCAmelCase_) lowerCamelCase__: Dict =rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_)) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_) @unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.") def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->List[str]: '''simple docstring''' pass @unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.") def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Optional[int]: '''simple docstring''' pass def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Optional[int]: '''simple docstring''' lowerCamelCase__: List[Any] ="I was born in 92000, and this is falsé." lowerCamelCase__: Union[str, Any] =["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ] # fmt: on lowerCamelCase__: List[str] =DebertaVaTokenizer(UpperCAmelCase_ , split_by_punct=UpperCAmelCase_) lowerCamelCase__: int =tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_)) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_) lowerCamelCase__: Any =DebertaVaTokenizerFast(UpperCAmelCase_ , split_by_punct=UpperCAmelCase_) lowerCamelCase__: int =rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_)) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : str) ->List[str]: '''simple docstring''' lowerCamelCase__: Optional[Any] ="I was born in 92000, and this is falsé." lowerCamelCase__: Optional[int] =["▁i", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ] # fmt: on lowerCamelCase__: Any =DebertaVaTokenizer(UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , split_by_punct=UpperCAmelCase_) lowerCamelCase__: Any =tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_)) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_) lowerCamelCase__: List[Any] =DebertaVaTokenizerFast(UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , split_by_punct=UpperCAmelCase_) lowerCamelCase__: Optional[Any] =rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_)) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : int) ->List[str]: '''simple docstring''' lowerCamelCase__: Dict ="I was born in 92000, and this is falsé." lowerCamelCase__: Union[str, Any] =["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ] # fmt: on lowerCamelCase__: int =DebertaVaTokenizer(UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , split_by_punct=UpperCAmelCase_) lowerCamelCase__: Union[str, Any] =tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_)) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_) lowerCamelCase__: Union[str, Any] =DebertaVaTokenizerFast(UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , split_by_punct=UpperCAmelCase_) lowerCamelCase__: Optional[int] =rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_)) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : int) ->Any: '''simple docstring''' lowerCamelCase__: Optional[Any] ="I was born in 92000, and this is falsé." lowerCamelCase__: Dict =["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ] # fmt: on lowerCamelCase__: Dict =DebertaVaTokenizer(UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , split_by_punct=UpperCAmelCase_) lowerCamelCase__: Optional[int] =tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_)) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_) lowerCamelCase__: int =DebertaVaTokenizerFast(UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , split_by_punct=UpperCAmelCase_) lowerCamelCase__: Optional[int] =rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_)) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->str: '''simple docstring''' lowerCamelCase__: List[str] =" \tHeLLo!how \n Are yoU? " lowerCamelCase__: Union[str, Any] =["▁", "<unk>", "e", "<unk>", "o", "!", "how", "▁", "<unk>", "re", "▁yo", "<unk>", "?"] # fmt: on lowerCamelCase__: Optional[int] =DebertaVaTokenizer(UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , split_by_punct=UpperCAmelCase_) lowerCamelCase__: Optional[Any] =tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_)) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_) lowerCamelCase__: str =DebertaVaTokenizerFast(UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , split_by_punct=UpperCAmelCase_) lowerCamelCase__: Dict =rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_)) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->List[Any]: '''simple docstring''' lowerCamelCase__: Dict =self.get_tokenizer() lowerCamelCase__: Optional[int] =self.get_rust_tokenizer() lowerCamelCase__: Union[str, Any] ="I was born in 92000, and this is falsé." lowerCamelCase__: Optional[Any] =tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_)) lowerCamelCase__: Union[str, Any] =rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_)) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_) lowerCamelCase__: List[str] =tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_) lowerCamelCase__: int =rust_tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_) lowerCamelCase__: Dict =self.get_rust_tokenizer() lowerCamelCase__: Optional[int] =tokenizer.encode(UpperCAmelCase_) lowerCamelCase__: Optional[int] =rust_tokenizer.encode(UpperCAmelCase_) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : str) ->Union[str, Any]: '''simple docstring''' lowerCamelCase__: Optional[Any] ="This is a test" lowerCamelCase__: Optional[Any] =[13, 1, 4_398, 25, 21, 1_289] lowerCamelCase__: Tuple =["▁", "T", "his", "▁is", "▁a", "▁test"] lowerCamelCase__: str =["▁", "<unk>", "his", "▁is", "▁a", "▁test"] lowerCamelCase__: List[Any] =DebertaVaTokenizer(UpperCAmelCase_ , keep_accents=UpperCAmelCase_) lowerCamelCase__: Optional[int] =DebertaVaTokenizerFast(UpperCAmelCase_ , keep_accents=UpperCAmelCase_) lowerCamelCase__: Union[str, Any] =tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_) lowerCamelCase__: Dict =tokenizer.tokenize(UpperCAmelCase_) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_) lowerCamelCase__: List[Any] =tokenizer.convert_ids_to_tokens(UpperCAmelCase_) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_) lowerCamelCase__: List[str] =rust_tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_) lowerCamelCase__: Dict =rust_tokenizer.tokenize(UpperCAmelCase_) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_) lowerCamelCase__: List[str] =rust_tokenizer.convert_ids_to_tokens(UpperCAmelCase_) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_) # fmt: off lowerCamelCase__: Union[str, Any] ="I was born in 92000, and this is falsé." lowerCamelCase__: List[str] =[13, 1, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9] lowerCamelCase__: List[Any] =["▁", "I", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", ".", ] lowerCamelCase__: List[str] =["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ] # fmt: on lowerCamelCase__: Tuple =tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_) lowerCamelCase__: Tuple =tokenizer.tokenize(UpperCAmelCase_) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_) lowerCamelCase__: Tuple =tokenizer.convert_ids_to_tokens(UpperCAmelCase_) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_) lowerCamelCase__: Tuple =rust_tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_) lowerCamelCase__: Any =rust_tokenizer.tokenize(UpperCAmelCase_) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_) lowerCamelCase__: str =rust_tokenizer.convert_ids_to_tokens(UpperCAmelCase_) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : List[str]) ->List[Any]: '''simple docstring''' lowerCamelCase__: int =DebertaVaTokenizer(UpperCAmelCase_) lowerCamelCase__: Optional[int] =tokenizer.encode("sequence builders") lowerCamelCase__: Dict =tokenizer.encode("multi-sequence build") lowerCamelCase__: Union[str, Any] =tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_) lowerCamelCase__: Optional[Any] =tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ , UpperCAmelCase_) self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , UpperCAmelCase_) self.assertEqual( [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , UpperCAmelCase_ , ) @slow def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Any: '''simple docstring''' lowerCamelCase__: Dict ={"input_ids": [[1, 39_867, 36, 19_390, 486, 27, 35_052, 81_436, 18, 60_685, 1_225, 7, 35_052, 81_436, 18, 9_367, 16_899, 18, 15_937, 53, 594, 773, 18, 16_287, 30_465, 36, 15_937, 6, 41_139, 38, 36_979, 60_763, 191, 6, 34_132, 99, 6, 50_538, 390, 43_230, 6, 34_132, 2_779, 20_850, 14, 699, 1_072, 1_194, 36, 382, 10_901, 53, 7, 699, 1_072, 2_084, 36, 20_422, 630, 53, 19, 105, 3_049, 1_896, 1_053, 16_899, 1_506, 11, 37_978, 4_243, 7, 1_237, 31_869, 200, 16_566, 654, 6, 35_052, 81_436, 7, 55_630, 13_593, 4, 2], [1, 26, 15_011, 13, 667, 8, 1_053, 18, 23_611, 1_237, 72_356, 12_820, 34, 104_134, 1_209, 35, 13_313, 6_627, 21, 202, 347, 7, 164, 2_399, 11, 46, 4_485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1_232, 2_864, 15_785, 14_951, 105, 5, 8_581, 1_250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCAmelCase_ , model_name="microsoft/deberta-v2-xlarge" , revision="ad6e42c1532ddf3a15c39246b63f5559d558b670" , )
437
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { "facebook/xlm-roberta-xl": "https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json", "facebook/xlm-roberta-xxl": "https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json", # See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl } class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' lowercase_ = "xlm-roberta-xl" def __init__(self : Tuple , UpperCAmelCase_ : Union[str, Any]=250_880 , UpperCAmelCase_ : Optional[int]=2_560 , UpperCAmelCase_ : Union[str, Any]=36 , UpperCAmelCase_ : str=32 , UpperCAmelCase_ : Dict=10_240 , UpperCAmelCase_ : Optional[int]="gelu" , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : List[Any]=514 , UpperCAmelCase_ : List[Any]=1 , UpperCAmelCase_ : Dict=0.02 , UpperCAmelCase_ : Dict=1E-0_5 , UpperCAmelCase_ : int=1 , UpperCAmelCase_ : Dict=0 , UpperCAmelCase_ : Optional[Any]=2 , UpperCAmelCase_ : Union[str, Any]="absolute" , UpperCAmelCase_ : str=True , UpperCAmelCase_ : str=None , **UpperCAmelCase_ : List[str] , ) ->int: '''simple docstring''' super().__init__(pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_) lowerCamelCase__: List[str] =vocab_size lowerCamelCase__: List[Any] =hidden_size lowerCamelCase__: Any =num_hidden_layers lowerCamelCase__: Optional[Any] =num_attention_heads lowerCamelCase__: Dict =hidden_act lowerCamelCase__: str =intermediate_size lowerCamelCase__: List[Any] =hidden_dropout_prob lowerCamelCase__: List[str] =attention_probs_dropout_prob lowerCamelCase__: Union[str, Any] =max_position_embeddings lowerCamelCase__: Optional[int] =type_vocab_size lowerCamelCase__: Tuple =initializer_range lowerCamelCase__: Tuple =layer_norm_eps lowerCamelCase__: Dict =position_embedding_type lowerCamelCase__: Optional[int] =use_cache lowerCamelCase__: List[str] =classifier_dropout class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' @property def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Mapping[str, Mapping[int, str]]: '''simple docstring''' if self.task == "multiple-choice": lowerCamelCase__: Union[str, Any] ={0: "batch", 1: "choice", 2: "sequence"} else: lowerCamelCase__: Optional[int] ={0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ])
437
1