code
stringlengths
82
54.1k
code_codestyle
int64
0
699
style_context
stringlengths
111
35.6k
style_context_codestyle
int64
0
699
label
int64
0
1
from math import sqrt def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> bool: """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def A__ ( SCREAMING_SNAKE_CASE_ : int = 1_00_01 ) -> int: """simple docstring""" _UpperCAmelCase = 0 _UpperCAmelCase = 1 while count != nth and number < 3: number += 1 if is_prime(SCREAMING_SNAKE_CASE_ ): count += 1 while count != nth: number += 2 if is_prime(SCREAMING_SNAKE_CASE_ ): count += 1 return number if __name__ == "__main__": print(f'''{solution() = }''')
32
UpperCAmelCase_ = { "A": ".-", "B": "-...", "C": "-.-.", "D": "-..", "E": ".", "F": "..-.", "G": "--.", "H": "....", "I": "..", "J": ".---", "K": "-.-", "L": ".-..", "M": "--", "N": "-.", "O": "---", "P": ".--.", "Q": "--.-", "R": ".-.", "S": "...", "T": "-", "U": "..-", "V": "...-", "W": ".--", "X": "-..-", "Y": "-.--", "Z": "--..", "1": ".----", "2": "..---", "3": "...--", "4": "....-", "5": ".....", "6": "-....", "7": "--...", "8": "---..", "9": "----.", "0": "-----", "&": ".-...", "@": ".--.-.", ":": "---...", ",": "--..--", ".": ".-.-.-", "'": ".----.", "\"": ".-..-.", "?": "..--..", "/": "-..-.", "=": "-...-", "+": ".-.-.", "-": "-....-", "(": "-.--.", ")": "-.--.-", "!": "-.-.--", " ": "/" } # Exclamation mark is not in ITU-R recommendation # fmt: on UpperCAmelCase_ = {value: key for key, value in MORSE_CODE_DICT.items()} def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> str: """simple docstring""" return " ".join(MORSE_CODE_DICT[char] for char in message.upper() ) def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> str: """simple docstring""" return "".join(REVERSE_DICT[char] for char in message.split() ) def A__ ( ) -> None: """simple docstring""" _UpperCAmelCase = '''Morse code here!''' print(SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = encrypt(SCREAMING_SNAKE_CASE_ ) print(SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = decrypt(SCREAMING_SNAKE_CASE_ ) print(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": main()
32
1
import baseaa def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> bytes: """simple docstring""" return baseaa.baaencode(string.encode('''utf-8''' ) ) def A__ ( SCREAMING_SNAKE_CASE_ : bytes ) -> str: """simple docstring""" return baseaa.baadecode(SCREAMING_SNAKE_CASE_ ).decode('''utf-8''' ) if __name__ == "__main__": UpperCAmelCase_ = "Hello World!" UpperCAmelCase_ = baseaa_encode(test) print(encoded) UpperCAmelCase_ = baseaa_decode(encoded) print(decoded)
32
import gc import unittest import numpy as np import torch from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class __UpperCamelCase ( A__ , unittest.TestCase ): __A : Any = DanceDiffusionPipeline __A : Any = UNCONDITIONAL_AUDIO_GENERATION_PARAMS __A : Tuple = PipelineTesterMixin.required_optional_params - { """callback""", """latents""", """callback_steps""", """output_type""", """num_images_per_prompt""", } __A : Tuple = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS __A : List[str] = False __A : str = False def UpperCamelCase( self ): torch.manual_seed(0 ) _UpperCAmelCase = UNetaDModel( block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=16000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=_UpperCamelCase , use_timestep_embedding=_UpperCamelCase , time_embedding_type='''fourier''' , mid_block_type='''UNetMidBlock1D''' , down_block_types=('''DownBlock1DNoSkip''', '''DownBlock1D''', '''AttnDownBlock1D''') , up_block_types=('''AttnUpBlock1D''', '''UpBlock1D''', '''UpBlock1DNoSkip''') , ) _UpperCAmelCase = IPNDMScheduler() _UpperCAmelCase = { '''unet''': unet, '''scheduler''': scheduler, } return components def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase=0 ): if str(_UpperCamelCase ).startswith('''mps''' ): _UpperCAmelCase = torch.manual_seed(_UpperCamelCase ) else: _UpperCAmelCase = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase ) _UpperCAmelCase = { '''batch_size''': 1, '''generator''': generator, '''num_inference_steps''': 4, } return inputs def UpperCamelCase( self ): _UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator _UpperCAmelCase = self.get_dummy_components() _UpperCAmelCase = DanceDiffusionPipeline(**_UpperCamelCase ) _UpperCAmelCase = pipe.to(_UpperCamelCase ) pipe.set_progress_bar_config(disable=_UpperCamelCase ) _UpperCAmelCase = self.get_dummy_inputs(_UpperCamelCase ) _UpperCAmelCase = pipe(**_UpperCamelCase ) _UpperCAmelCase = output.audios _UpperCAmelCase = audio[0, -3:, -3:] assert audio.shape == (1, 2, components["unet"].sample_size) _UpperCAmelCase = np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2 @skip_mps def UpperCamelCase( self ): return super().test_save_load_local() @skip_mps def UpperCamelCase( self ): return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 ) @skip_mps def UpperCamelCase( self ): return super().test_save_load_optional_components() @skip_mps def UpperCamelCase( self ): return super().test_attention_slicing_forward_pass() def UpperCamelCase( self ): super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class __UpperCamelCase ( unittest.TestCase ): def UpperCamelCase( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase( self ): _UpperCAmelCase = torch_device _UpperCAmelCase = DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' ) _UpperCAmelCase = pipe.to(_UpperCamelCase ) pipe.set_progress_bar_config(disable=_UpperCamelCase ) _UpperCAmelCase = torch.manual_seed(0 ) _UpperCAmelCase = pipe(generator=_UpperCamelCase , num_inference_steps=100 , audio_length_in_s=4.096 ) _UpperCAmelCase = output.audios _UpperCAmelCase = audio[0, -3:, -3:] assert audio.shape == (1, 2, pipe.unet.sample_size) _UpperCAmelCase = np.array([-0.0192, -0.0231, -0.0318, -0.0059, 0.0002, -0.0020] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2 def UpperCamelCase( self ): _UpperCAmelCase = torch_device _UpperCAmelCase = DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' , torch_dtype=torch.floataa ) _UpperCAmelCase = pipe.to(_UpperCamelCase ) pipe.set_progress_bar_config(disable=_UpperCamelCase ) _UpperCAmelCase = torch.manual_seed(0 ) _UpperCAmelCase = pipe(generator=_UpperCamelCase , num_inference_steps=100 , audio_length_in_s=4.096 ) _UpperCAmelCase = output.audios _UpperCAmelCase = audio[0, -3:, -3:] assert audio.shape == (1, 2, pipe.unet.sample_size) _UpperCAmelCase = np.array([-0.0367, -0.0488, -0.0771, -0.0525, -0.0444, -0.0341] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
32
1
import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import datasets import datasets.config from .utils import require_beam class __UpperCamelCase ( datasets.BeamBasedBuilder ): def UpperCamelCase( self ): return datasets.DatasetInfo( features=datasets.Features({'''content''': datasets.Value('''string''' )} ) , supervised_keys=_UpperCamelCase , ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase ): return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_dummy_examples()} )] def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase ): import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(_UpperCamelCase ) class __UpperCamelCase ( datasets.BeamBasedBuilder ): def UpperCamelCase( self ): return datasets.DatasetInfo( features=datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) , supervised_keys=_UpperCamelCase , ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase ): return [ datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_nested_examples()} ) ] def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase ): import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(_UpperCamelCase ) def A__ ( ) -> Dict: """simple docstring""" return [(i, {"content": content}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )] def A__ ( ) -> Dict: """simple docstring""" return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )] class __UpperCamelCase ( A__ ): @require_beam def UpperCamelCase( self ): _UpperCAmelCase = len(get_test_dummy_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: _UpperCAmelCase = DummyBeamDataset(cache_dir=_UpperCamelCase , beam_runner='''DirectRunner''' ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(_UpperCamelCase , builder.name , '''default''' , '''0.0.0''' , f'''{builder.name}-train.arrow''' ) ) ) self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) ) _UpperCAmelCase = builder.as_dataset() self.assertEqual(dset['''train'''].num_rows , _UpperCamelCase ) self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , _UpperCamelCase ) self.assertDictEqual(dset['''train'''][0] , get_test_dummy_examples()[0][1] ) self.assertDictEqual( dset['''train'''][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] ) self.assertTrue( os.path.exists(os.path.join(_UpperCamelCase , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) ) del dset @require_beam def UpperCamelCase( self ): import apache_beam as beam _UpperCAmelCase = beam.io.parquetio.WriteToParquet _UpperCAmelCase = len(get_test_dummy_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: _UpperCAmelCase = DummyBeamDataset(cache_dir=_UpperCamelCase , beam_runner='''DirectRunner''' ) with patch('''apache_beam.io.parquetio.WriteToParquet''' ) as write_parquet_mock: _UpperCAmelCase = partial(_UpperCamelCase , num_shards=2 ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join( _UpperCamelCase , builder.name , '''default''' , '''0.0.0''' , f'''{builder.name}-train-00000-of-00002.arrow''' ) ) ) self.assertTrue( os.path.exists( os.path.join( _UpperCamelCase , builder.name , '''default''' , '''0.0.0''' , f'''{builder.name}-train-00000-of-00002.arrow''' ) ) ) self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) ) _UpperCAmelCase = builder.as_dataset() self.assertEqual(dset['''train'''].num_rows , _UpperCamelCase ) self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , _UpperCamelCase ) # Order is not preserved when sharding, so we just check that all the elements are there self.assertListEqual(sorted(dset['''train''']['''content'''] ) , sorted(['''foo''', '''bar''', '''foobar'''] ) ) self.assertTrue( os.path.exists(os.path.join(_UpperCamelCase , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) ) del dset @require_beam def UpperCamelCase( self ): with tempfile.TemporaryDirectory() as tmp_cache_dir: _UpperCAmelCase = DummyBeamDataset(cache_dir=_UpperCamelCase ) self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare ) @require_beam def UpperCamelCase( self ): _UpperCAmelCase = len(get_test_nested_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: _UpperCAmelCase = NestedBeamDataset(cache_dir=_UpperCamelCase , beam_runner='''DirectRunner''' ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(_UpperCamelCase , builder.name , '''default''' , '''0.0.0''' , f'''{builder.name}-train.arrow''' ) ) ) self.assertDictEqual( builder.info.features , datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) ) _UpperCAmelCase = builder.as_dataset() self.assertEqual(dset['''train'''].num_rows , _UpperCamelCase ) self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , _UpperCamelCase ) self.assertDictEqual(dset['''train'''][0] , get_test_nested_examples()[0][1] ) self.assertDictEqual( dset['''train'''][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] ) self.assertTrue( os.path.exists(os.path.join(_UpperCamelCase , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) ) del dset
32
from collections import OrderedDict from ...utils import logging from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update from .configuration_auto import CONFIG_MAPPING_NAMES UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = OrderedDict( [ # Base model mapping ("albert", "FlaxAlbertModel"), ("bart", "FlaxBartModel"), ("beit", "FlaxBeitModel"), ("bert", "FlaxBertModel"), ("big_bird", "FlaxBigBirdModel"), ("blenderbot", "FlaxBlenderbotModel"), ("blenderbot-small", "FlaxBlenderbotSmallModel"), ("clip", "FlaxCLIPModel"), ("distilbert", "FlaxDistilBertModel"), ("electra", "FlaxElectraModel"), ("gpt-sw3", "FlaxGPT2Model"), ("gpt2", "FlaxGPT2Model"), ("gpt_neo", "FlaxGPTNeoModel"), ("gptj", "FlaxGPTJModel"), ("longt5", "FlaxLongT5Model"), ("marian", "FlaxMarianModel"), ("mbart", "FlaxMBartModel"), ("mt5", "FlaxMT5Model"), ("opt", "FlaxOPTModel"), ("pegasus", "FlaxPegasusModel"), ("regnet", "FlaxRegNetModel"), ("resnet", "FlaxResNetModel"), ("roberta", "FlaxRobertaModel"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormModel"), ("roformer", "FlaxRoFormerModel"), ("t5", "FlaxT5Model"), ("vision-text-dual-encoder", "FlaxVisionTextDualEncoderModel"), ("vit", "FlaxViTModel"), ("wav2vec2", "FlaxWav2Vec2Model"), ("whisper", "FlaxWhisperModel"), ("xglm", "FlaxXGLMModel"), ("xlm-roberta", "FlaxXLMRobertaModel"), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for pre-training mapping ("albert", "FlaxAlbertForPreTraining"), ("bart", "FlaxBartForConditionalGeneration"), ("bert", "FlaxBertForPreTraining"), ("big_bird", "FlaxBigBirdForPreTraining"), ("electra", "FlaxElectraForPreTraining"), ("longt5", "FlaxLongT5ForConditionalGeneration"), ("mbart", "FlaxMBartForConditionalGeneration"), ("mt5", "FlaxMT5ForConditionalGeneration"), ("roberta", "FlaxRobertaForMaskedLM"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"), ("roformer", "FlaxRoFormerForMaskedLM"), ("t5", "FlaxT5ForConditionalGeneration"), ("wav2vec2", "FlaxWav2Vec2ForPreTraining"), ("whisper", "FlaxWhisperForConditionalGeneration"), ("xlm-roberta", "FlaxXLMRobertaForMaskedLM"), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for Masked LM mapping ("albert", "FlaxAlbertForMaskedLM"), ("bart", "FlaxBartForConditionalGeneration"), ("bert", "FlaxBertForMaskedLM"), ("big_bird", "FlaxBigBirdForMaskedLM"), ("distilbert", "FlaxDistilBertForMaskedLM"), ("electra", "FlaxElectraForMaskedLM"), ("mbart", "FlaxMBartForConditionalGeneration"), ("roberta", "FlaxRobertaForMaskedLM"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"), ("roformer", "FlaxRoFormerForMaskedLM"), ("xlm-roberta", "FlaxXLMRobertaForMaskedLM"), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for Seq2Seq Causal LM mapping ("bart", "FlaxBartForConditionalGeneration"), ("blenderbot", "FlaxBlenderbotForConditionalGeneration"), ("blenderbot-small", "FlaxBlenderbotSmallForConditionalGeneration"), ("encoder-decoder", "FlaxEncoderDecoderModel"), ("longt5", "FlaxLongT5ForConditionalGeneration"), ("marian", "FlaxMarianMTModel"), ("mbart", "FlaxMBartForConditionalGeneration"), ("mt5", "FlaxMT5ForConditionalGeneration"), ("pegasus", "FlaxPegasusForConditionalGeneration"), ("t5", "FlaxT5ForConditionalGeneration"), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for Image-classsification ("beit", "FlaxBeitForImageClassification"), ("regnet", "FlaxRegNetForImageClassification"), ("resnet", "FlaxResNetForImageClassification"), ("vit", "FlaxViTForImageClassification"), ] ) UpperCAmelCase_ = OrderedDict( [ ("vision-encoder-decoder", "FlaxVisionEncoderDecoderModel"), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for Causal LM mapping ("bart", "FlaxBartForCausalLM"), ("bert", "FlaxBertForCausalLM"), ("big_bird", "FlaxBigBirdForCausalLM"), ("electra", "FlaxElectraForCausalLM"), ("gpt-sw3", "FlaxGPT2LMHeadModel"), ("gpt2", "FlaxGPT2LMHeadModel"), ("gpt_neo", "FlaxGPTNeoForCausalLM"), ("gptj", "FlaxGPTJForCausalLM"), ("opt", "FlaxOPTForCausalLM"), ("roberta", "FlaxRobertaForCausalLM"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForCausalLM"), ("xglm", "FlaxXGLMForCausalLM"), ("xlm-roberta", "FlaxXLMRobertaForCausalLM"), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for Sequence Classification mapping ("albert", "FlaxAlbertForSequenceClassification"), ("bart", "FlaxBartForSequenceClassification"), ("bert", "FlaxBertForSequenceClassification"), ("big_bird", "FlaxBigBirdForSequenceClassification"), ("distilbert", "FlaxDistilBertForSequenceClassification"), ("electra", "FlaxElectraForSequenceClassification"), ("mbart", "FlaxMBartForSequenceClassification"), ("roberta", "FlaxRobertaForSequenceClassification"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForSequenceClassification"), ("roformer", "FlaxRoFormerForSequenceClassification"), ("xlm-roberta", "FlaxXLMRobertaForSequenceClassification"), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for Question Answering mapping ("albert", "FlaxAlbertForQuestionAnswering"), ("bart", "FlaxBartForQuestionAnswering"), ("bert", "FlaxBertForQuestionAnswering"), ("big_bird", "FlaxBigBirdForQuestionAnswering"), ("distilbert", "FlaxDistilBertForQuestionAnswering"), ("electra", "FlaxElectraForQuestionAnswering"), ("mbart", "FlaxMBartForQuestionAnswering"), ("roberta", "FlaxRobertaForQuestionAnswering"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForQuestionAnswering"), ("roformer", "FlaxRoFormerForQuestionAnswering"), ("xlm-roberta", "FlaxXLMRobertaForQuestionAnswering"), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for Token Classification mapping ("albert", "FlaxAlbertForTokenClassification"), ("bert", "FlaxBertForTokenClassification"), ("big_bird", "FlaxBigBirdForTokenClassification"), ("distilbert", "FlaxDistilBertForTokenClassification"), ("electra", "FlaxElectraForTokenClassification"), ("roberta", "FlaxRobertaForTokenClassification"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForTokenClassification"), ("roformer", "FlaxRoFormerForTokenClassification"), ("xlm-roberta", "FlaxXLMRobertaForTokenClassification"), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for Multiple Choice mapping ("albert", "FlaxAlbertForMultipleChoice"), ("bert", "FlaxBertForMultipleChoice"), ("big_bird", "FlaxBigBirdForMultipleChoice"), ("distilbert", "FlaxDistilBertForMultipleChoice"), ("electra", "FlaxElectraForMultipleChoice"), ("roberta", "FlaxRobertaForMultipleChoice"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMultipleChoice"), ("roformer", "FlaxRoFormerForMultipleChoice"), ("xlm-roberta", "FlaxXLMRobertaForMultipleChoice"), ] ) UpperCAmelCase_ = OrderedDict( [ ("bert", "FlaxBertForNextSentencePrediction"), ] ) UpperCAmelCase_ = OrderedDict( [ ("speech-encoder-decoder", "FlaxSpeechEncoderDecoderModel"), ("whisper", "FlaxWhisperForConditionalGeneration"), ] ) UpperCAmelCase_ = OrderedDict( [ ("whisper", "FlaxWhisperForAudioClassification"), ] ) UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES) UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES) UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES ) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES ) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES ) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES ) class __UpperCamelCase ( _BaseAutoModelClass ): __A : List[str] = FLAX_MODEL_MAPPING UpperCAmelCase_ = auto_class_update(FlaxAutoModel) class __UpperCamelCase ( _BaseAutoModelClass ): __A : Optional[Any] = FLAX_MODEL_FOR_PRETRAINING_MAPPING UpperCAmelCase_ = auto_class_update(FlaxAutoModelForPreTraining, head_doc="pretraining") class __UpperCamelCase ( _BaseAutoModelClass ): __A : List[Any] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING UpperCAmelCase_ = auto_class_update(FlaxAutoModelForCausalLM, head_doc="causal language modeling") class __UpperCamelCase ( _BaseAutoModelClass ): __A : List[str] = FLAX_MODEL_FOR_MASKED_LM_MAPPING UpperCAmelCase_ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="masked language modeling") class __UpperCamelCase ( _BaseAutoModelClass ): __A : Dict = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING UpperCAmelCase_ = auto_class_update( FlaxAutoModelForSeqaSeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base" ) class __UpperCamelCase ( _BaseAutoModelClass ): __A : List[str] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING UpperCAmelCase_ = auto_class_update( FlaxAutoModelForSequenceClassification, head_doc="sequence classification" ) class __UpperCamelCase ( _BaseAutoModelClass ): __A : Dict = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING UpperCAmelCase_ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="question answering") class __UpperCamelCase ( _BaseAutoModelClass ): __A : Union[str, Any] = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING UpperCAmelCase_ = auto_class_update( FlaxAutoModelForTokenClassification, head_doc="token classification" ) class __UpperCamelCase ( _BaseAutoModelClass ): __A : Union[str, Any] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING UpperCAmelCase_ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="multiple choice") class __UpperCamelCase ( _BaseAutoModelClass ): __A : Any = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING UpperCAmelCase_ = auto_class_update( FlaxAutoModelForNextSentencePrediction, head_doc="next sentence prediction" ) class __UpperCamelCase ( _BaseAutoModelClass ): __A : Dict = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING UpperCAmelCase_ = auto_class_update( FlaxAutoModelForImageClassification, head_doc="image classification" ) class __UpperCamelCase ( _BaseAutoModelClass ): __A : Optional[int] = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING UpperCAmelCase_ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="vision-to-text modeling") class __UpperCamelCase ( _BaseAutoModelClass ): __A : str = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING UpperCAmelCase_ = auto_class_update( FlaxAutoModelForSpeechSeqaSeq, head_doc="sequence-to-sequence speech-to-text modeling" )
32
1
import enum import os from hashlib import shaaaa from typing import Optional from .. import config from .logging import get_logger UpperCAmelCase_ = get_logger(__name__) class __UpperCamelCase ( enum.Enum ): __A : int = """all_checks""" __A : Tuple = """basic_checks""" __A : List[Any] = """no_checks""" class __UpperCamelCase ( A__ ): pass class __UpperCamelCase ( A__ ): pass class __UpperCamelCase ( A__ ): pass class __UpperCamelCase ( A__ ): pass def A__ ( SCREAMING_SNAKE_CASE_ : Optional[dict] , SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : Dict=None ) -> Union[str, Any]: """simple docstring""" if expected_checksums is None: logger.info('''Unable to verify checksums.''' ) return if len(set(SCREAMING_SNAKE_CASE_ ) - set(SCREAMING_SNAKE_CASE_ ) ) > 0: raise ExpectedMoreDownloadedFiles(str(set(SCREAMING_SNAKE_CASE_ ) - set(SCREAMING_SNAKE_CASE_ ) ) ) if len(set(SCREAMING_SNAKE_CASE_ ) - set(SCREAMING_SNAKE_CASE_ ) ) > 0: raise UnexpectedDownloadedFile(str(set(SCREAMING_SNAKE_CASE_ ) - set(SCREAMING_SNAKE_CASE_ ) ) ) _UpperCAmelCase = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]] _UpperCAmelCase = ''' for ''' + verification_name if verification_name is not None else '''''' if len(SCREAMING_SNAKE_CASE_ ) > 0: raise NonMatchingChecksumError( F'''Checksums didn\'t match{for_verification_name}:\n''' F'''{bad_urls}\n''' '''Set `verification_mode=\'no_checks\'` to skip checksums verification and ignore this error''' ) logger.info('''All the checksums matched successfully''' + for_verification_name ) class __UpperCamelCase ( A__ ): pass class __UpperCamelCase ( A__ ): pass class __UpperCamelCase ( A__ ): pass class __UpperCamelCase ( A__ ): pass def A__ ( SCREAMING_SNAKE_CASE_ : Optional[dict] , SCREAMING_SNAKE_CASE_ : dict ) -> int: """simple docstring""" if expected_splits is None: logger.info('''Unable to verify splits sizes.''' ) return if len(set(SCREAMING_SNAKE_CASE_ ) - set(SCREAMING_SNAKE_CASE_ ) ) > 0: raise ExpectedMoreSplits(str(set(SCREAMING_SNAKE_CASE_ ) - set(SCREAMING_SNAKE_CASE_ ) ) ) if len(set(SCREAMING_SNAKE_CASE_ ) - set(SCREAMING_SNAKE_CASE_ ) ) > 0: raise UnexpectedSplits(str(set(SCREAMING_SNAKE_CASE_ ) - set(SCREAMING_SNAKE_CASE_ ) ) ) _UpperCAmelCase = [ {'''expected''': expected_splits[name], '''recorded''': recorded_splits[name]} for name in expected_splits if expected_splits[name].num_examples != recorded_splits[name].num_examples ] if len(SCREAMING_SNAKE_CASE_ ) > 0: raise NonMatchingSplitsSizesError(str(SCREAMING_SNAKE_CASE_ ) ) logger.info('''All the splits matched successfully.''' ) def A__ ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : bool = True ) -> dict: """simple docstring""" if record_checksum: _UpperCAmelCase = shaaaa() with open(SCREAMING_SNAKE_CASE_ , '''rb''' ) as f: for chunk in iter(lambda: f.read(1 << 20 ) , B'''''' ): m.update(SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = m.hexdigest() else: _UpperCAmelCase = None return {"num_bytes": os.path.getsize(SCREAMING_SNAKE_CASE_ ), "checksum": checksum} def A__ ( SCREAMING_SNAKE_CASE_ : Any ) -> Optional[Any]: """simple docstring""" if dataset_size and config.IN_MEMORY_MAX_SIZE: return dataset_size < config.IN_MEMORY_MAX_SIZE else: return False
32
import baseaa def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> bytes: """simple docstring""" return baseaa.baaencode(string.encode('''utf-8''' ) ) def A__ ( SCREAMING_SNAKE_CASE_ : bytes ) -> str: """simple docstring""" return baseaa.baadecode(SCREAMING_SNAKE_CASE_ ).decode('''utf-8''' ) if __name__ == "__main__": UpperCAmelCase_ = "Hello World!" UpperCAmelCase_ = baseaa_encode(test) print(encoded) UpperCAmelCase_ = baseaa_decode(encoded) print(decoded)
32
1
from collections import defaultdict from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst def A__ ( ) -> Tuple: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = 9, 14 # noqa: F841 _UpperCAmelCase = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] _UpperCAmelCase = defaultdict(SCREAMING_SNAKE_CASE_ ) for nodea, nodea, cost in edges: adjancency[nodea].append([nodea, cost] ) adjancency[nodea].append([nodea, cost] ) _UpperCAmelCase = mst(SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] for answer in expected: _UpperCAmelCase = tuple(answer[:2] ) _UpperCAmelCase = tuple(edge[::-1] ) assert edge in result or reverse in result
32
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, is_batched, to_numpy_array, valid_images, ) from ...utils import TensorType, logging UpperCAmelCase_ = logging.get_logger(__name__) class __UpperCamelCase ( A__ ): __A : int = ["""pixel_values"""] def __init__( self , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = PILImageResampling.BICUBIC , _UpperCamelCase = True , _UpperCamelCase = True , _UpperCamelCase = 1 / 255 , _UpperCamelCase = None , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ): super().__init__(**_UpperCamelCase ) _UpperCAmelCase = size if size is not None else {'''height''': 224, '''width''': 224} _UpperCAmelCase = get_size_dict(_UpperCamelCase ) _UpperCAmelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} _UpperCAmelCase = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase , param_name='''crop_size''' ) _UpperCAmelCase = do_resize _UpperCAmelCase = do_rescale _UpperCAmelCase = do_normalize _UpperCAmelCase = do_center_crop _UpperCAmelCase = crop_size _UpperCAmelCase = size _UpperCAmelCase = resample _UpperCAmelCase = rescale_factor _UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN _UpperCAmelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = PILImageResampling.BILINEAR , _UpperCamelCase = None , **_UpperCamelCase , ): _UpperCAmelCase = get_size_dict(_UpperCamelCase ) if "shortest_edge" in size: _UpperCAmelCase = get_resize_output_image_size(_UpperCamelCase , size=size['''shortest_edge'''] , default_to_square=_UpperCamelCase ) # size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"]) elif "height" in size and "width" in size: _UpperCAmelCase = (size['''height'''], size['''width''']) else: raise ValueError(f'''Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}''' ) return resize(_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ): _UpperCAmelCase = get_size_dict(_UpperCamelCase ) if "height" not in size or "width" not in size: raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(_UpperCamelCase , size=(size['''height'''], size['''width''']) , data_format=_UpperCamelCase , **_UpperCamelCase ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase ): return rescale(_UpperCamelCase , scale=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ): return normalize(_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = ChannelDimension.FIRST , **_UpperCamelCase , ): _UpperCAmelCase = do_resize if do_resize is not None else self.do_resize _UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale _UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize _UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop _UpperCAmelCase = crop_size if crop_size is not None else self.crop_size _UpperCAmelCase = get_size_dict(_UpperCamelCase , param_name='''crop_size''' , default_to_square=_UpperCamelCase ) _UpperCAmelCase = resample if resample is not None else self.resample _UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor _UpperCAmelCase = image_mean if image_mean is not None else self.image_mean _UpperCAmelCase = image_std if image_std is not None else self.image_std _UpperCAmelCase = size if size is not None else self.size _UpperCAmelCase = get_size_dict(_UpperCamelCase ) if not is_batched(_UpperCamelCase ): _UpperCAmelCase = [images] if not valid_images(_UpperCamelCase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) # All transformations expect numpy arrays. _UpperCAmelCase = [to_numpy_array(_UpperCamelCase ) for image in images] if do_resize: _UpperCAmelCase = [self.resize(image=_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase ) for image in images] if do_center_crop: _UpperCAmelCase = [self.center_crop(image=_UpperCamelCase , size=_UpperCamelCase ) for image in images] if do_rescale: _UpperCAmelCase = [self.rescale(image=_UpperCamelCase , scale=_UpperCamelCase ) for image in images] if do_normalize: _UpperCAmelCase = [self.normalize(image=_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase ) for image in images] _UpperCAmelCase = [to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase ) for image in images] _UpperCAmelCase = {'''pixel_values''': images} return BatchFeature(data=_UpperCamelCase , tensor_type=_UpperCamelCase )
32
1
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class __UpperCamelCase : def __init__( self , _UpperCamelCase , _UpperCamelCase=13 , _UpperCamelCase=7 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=99 , _UpperCamelCase=32 , _UpperCamelCase=5 , _UpperCamelCase=4 , _UpperCamelCase=37 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=512 , _UpperCamelCase=16 , _UpperCamelCase=2 , _UpperCamelCase=0.02 , _UpperCamelCase=3 , _UpperCamelCase=4 , _UpperCamelCase=None , ): _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = seq_length _UpperCAmelCase = is_training _UpperCAmelCase = use_token_type_ids _UpperCAmelCase = use_labels _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = type_sequence_label_size _UpperCAmelCase = initializer_range _UpperCAmelCase = num_labels _UpperCAmelCase = num_choices _UpperCAmelCase = scope _UpperCAmelCase = self.vocab_size - 1 def UpperCamelCase( self ): _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase = None if self.use_token_type_ids: _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) _UpperCAmelCase = OpenAIGPTConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) _UpperCAmelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , *_UpperCamelCase ): _UpperCAmelCase = OpenAIGPTModel(config=_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() _UpperCAmelCase = model(_UpperCamelCase , token_type_ids=_UpperCamelCase , head_mask=_UpperCamelCase ) _UpperCAmelCase = model(_UpperCamelCase , token_type_ids=_UpperCamelCase ) _UpperCAmelCase = model(_UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , *_UpperCamelCase ): _UpperCAmelCase = OpenAIGPTLMHeadModel(_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() _UpperCAmelCase = model(_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , *_UpperCamelCase ): _UpperCAmelCase = OpenAIGPTDoubleHeadsModel(_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() _UpperCAmelCase = model(_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , *_UpperCamelCase ): _UpperCAmelCase = self.num_labels _UpperCAmelCase = OpenAIGPTForSequenceClassification(_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() _UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCAmelCase = model(_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCamelCase( self ): _UpperCAmelCase = self.prepare_config_and_inputs() ( ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ) = config_and_inputs _UpperCAmelCase = { '''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask, } return config, inputs_dict @require_torch class __UpperCamelCase ( A__ , A__ , A__ , unittest.TestCase ): __A : List[str] = ( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) __A : Any = ( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly __A : str = ( { """feature-extraction""": OpenAIGPTModel, """text-classification""": OpenAIGPTForSequenceClassification, """text-generation""": OpenAIGPTLMHeadModel, """zero-shot""": OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=False ): _UpperCAmelCase = super()._prepare_for_class(_UpperCamelCase , _UpperCamelCase , return_labels=_UpperCamelCase ) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": _UpperCAmelCase = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=_UpperCamelCase , ) _UpperCAmelCase = inputs_dict['''labels'''] _UpperCAmelCase = inputs_dict['''labels'''] _UpperCAmelCase = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=_UpperCamelCase , ) _UpperCAmelCase = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_UpperCamelCase ) return inputs_dict def UpperCamelCase( self ): _UpperCAmelCase = OpenAIGPTModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=_UpperCamelCase , n_embd=37 ) def UpperCamelCase( self ): self.config_tester.run_common_tests() def UpperCamelCase( self ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*_UpperCamelCase ) def UpperCamelCase( self ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*_UpperCamelCase ) def UpperCamelCase( self ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*_UpperCamelCase ) def UpperCamelCase( self ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*_UpperCamelCase ) @slow def UpperCamelCase( self ): for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCAmelCase = OpenAIGPTModel.from_pretrained(_UpperCamelCase ) self.assertIsNotNone(_UpperCamelCase ) @require_torch class __UpperCamelCase ( unittest.TestCase ): @slow def UpperCamelCase( self ): _UpperCAmelCase = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' ) model.to(_UpperCamelCase ) _UpperCAmelCase = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=_UpperCamelCase ) # the president is _UpperCAmelCase = [ 481, 4735, 544, 246, 963, 870, 762, 239, 244, 40477, 244, 249, 719, 881, 487, 544, 240, 244, 603, 481, ] # the president is a very good man. " \n " i\'m sure he is, " said the _UpperCAmelCase = model.generate(_UpperCamelCase , do_sample=_UpperCamelCase ) self.assertListEqual(output_ids[0].tolist() , _UpperCamelCase )
32
from ..utils import DummyObject, requires_backends class __UpperCamelCase ( metaclass=A__ ): __A : str = ["""torch""", """scipy"""] def __init__( self , *_UpperCamelCase , **_UpperCamelCase ): requires_backends(self , ['''torch''', '''scipy'''] ) @classmethod def UpperCamelCase( cls , *_UpperCamelCase , **_UpperCamelCase ): requires_backends(cls , ['''torch''', '''scipy'''] ) @classmethod def UpperCamelCase( cls , *_UpperCamelCase , **_UpperCamelCase ): requires_backends(cls , ['''torch''', '''scipy'''] )
32
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available UpperCAmelCase_ = {"configuration_yolos": ["YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP", "YolosConfig", "YolosOnnxConfig"]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = ["YolosFeatureExtractor"] UpperCAmelCase_ = ["YolosImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ "YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST", "YolosForObjectDetection", "YolosModel", "YolosPreTrainedModel", ] if TYPE_CHECKING: from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_yolos import YolosFeatureExtractor from .image_processing_yolos import YolosImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_yolos import ( YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST, YolosForObjectDetection, YolosModel, YolosPreTrainedModel, ) else: import sys UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
32
def A__ ( SCREAMING_SNAKE_CASE_ : int = 2_00_00_00 ) -> int: """simple docstring""" _UpperCAmelCase = [0 for i in range(n + 1 )] _UpperCAmelCase = 1 _UpperCAmelCase = 1 for i in range(2 , int(n**0.5 ) + 1 ): if primality_list[i] == 0: for j in range(i * i , n + 1 , SCREAMING_SNAKE_CASE_ ): _UpperCAmelCase = 1 _UpperCAmelCase = 0 for i in range(SCREAMING_SNAKE_CASE_ ): if primality_list[i] == 0: sum_of_primes += i return sum_of_primes if __name__ == "__main__": print(f'''{solution() = }''')
32
1
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { "SCUT-DLVCLab/lilt-roberta-en-base": ( "https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json" ), } class __UpperCamelCase ( A__ ): __A : List[Any] = """lilt""" def __init__( self , _UpperCamelCase=30522 , _UpperCamelCase=768 , _UpperCamelCase=12 , _UpperCamelCase=12 , _UpperCamelCase=3072 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=512 , _UpperCamelCase=2 , _UpperCamelCase=0.02 , _UpperCamelCase=1e-12 , _UpperCamelCase=0 , _UpperCamelCase="absolute" , _UpperCamelCase=None , _UpperCamelCase=4 , _UpperCamelCase=1024 , **_UpperCamelCase , ): super().__init__(pad_token_id=_UpperCamelCase , **_UpperCamelCase ) _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = hidden_act _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = position_embedding_type _UpperCAmelCase = classifier_dropout _UpperCAmelCase = channel_shrink_ratio _UpperCAmelCase = max_ad_position_embeddings
32
import warnings from ...utils import logging from .image_processing_glpn import GLPNImageProcessor UpperCAmelCase_ = logging.get_logger(__name__) class __UpperCamelCase ( A__ ): def __init__( self , *_UpperCamelCase , **_UpperCamelCase ): warnings.warn( '''The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use GLPNImageProcessor instead.''' , _UpperCamelCase , ) super().__init__(*_UpperCamelCase , **_UpperCamelCase )
32
1
import logging from transformers import PretrainedConfig UpperCAmelCase_ = logging.getLogger(__name__) UpperCAmelCase_ = { "bertabs-finetuned-cnndm": "https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json", } class __UpperCamelCase ( A__ ): __A : Tuple = """bertabs""" def __init__( self , _UpperCamelCase=30522 , _UpperCamelCase=512 , _UpperCamelCase=6 , _UpperCamelCase=512 , _UpperCamelCase=8 , _UpperCamelCase=512 , _UpperCamelCase=0.2 , _UpperCamelCase=6 , _UpperCamelCase=768 , _UpperCamelCase=8 , _UpperCamelCase=2048 , _UpperCamelCase=0.2 , **_UpperCamelCase , ): super().__init__(**_UpperCamelCase ) _UpperCAmelCase = vocab_size _UpperCAmelCase = max_pos _UpperCAmelCase = enc_layers _UpperCAmelCase = enc_hidden_size _UpperCAmelCase = enc_heads _UpperCAmelCase = enc_ff_size _UpperCAmelCase = enc_dropout _UpperCAmelCase = dec_layers _UpperCAmelCase = dec_hidden_size _UpperCAmelCase = dec_heads _UpperCAmelCase = dec_ff_size _UpperCAmelCase = dec_dropout
32
from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __UpperCamelCase ( A__ ): __A : Dict = ["""image_processor""", """tokenizer"""] __A : List[str] = """BridgeTowerImageProcessor""" __A : str = ("""RobertaTokenizer""", """RobertaTokenizerFast""") def __init__( self , _UpperCamelCase , _UpperCamelCase ): super().__init__(_UpperCamelCase , _UpperCamelCase ) def __call__( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = True , _UpperCamelCase = False , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = 0 , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = True , _UpperCamelCase = None , **_UpperCamelCase , ): _UpperCAmelCase = self.tokenizer( text=_UpperCamelCase , add_special_tokens=_UpperCamelCase , padding=_UpperCamelCase , truncation=_UpperCamelCase , max_length=_UpperCamelCase , stride=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_token_type_ids=_UpperCamelCase , return_attention_mask=_UpperCamelCase , return_overflowing_tokens=_UpperCamelCase , return_special_tokens_mask=_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , return_length=_UpperCamelCase , verbose=_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase , ) # add pixel_values + pixel_mask _UpperCAmelCase = self.image_processor( _UpperCamelCase , return_tensors=_UpperCamelCase , do_normalize=_UpperCamelCase , do_center_crop=_UpperCamelCase , **_UpperCamelCase ) encoding.update(_UpperCamelCase ) return encoding def UpperCamelCase( self , *_UpperCamelCase , **_UpperCamelCase ): return self.tokenizer.batch_decode(*_UpperCamelCase , **_UpperCamelCase ) def UpperCamelCase( self , *_UpperCamelCase , **_UpperCamelCase ): return self.tokenizer.decode(*_UpperCamelCase , **_UpperCamelCase ) @property def UpperCamelCase( self ): _UpperCAmelCase = self.tokenizer.model_input_names _UpperCAmelCase = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
32
1
import unittest from transformers import BertGenerationConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import BertGenerationDecoder, BertGenerationEncoder class __UpperCamelCase : def __init__( self , _UpperCamelCase , _UpperCamelCase=13 , _UpperCamelCase=7 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=99 , _UpperCamelCase=32 , _UpperCamelCase=5 , _UpperCamelCase=4 , _UpperCamelCase=37 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=50 , _UpperCamelCase=0.02 , _UpperCamelCase=True , _UpperCamelCase=None , ): _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = seq_length _UpperCAmelCase = is_training _UpperCAmelCase = use_input_mask _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = initializer_range _UpperCAmelCase = use_labels _UpperCAmelCase = scope def UpperCamelCase( self ): _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase = None if self.use_input_mask: _UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase = self.get_config() return config, input_ids, input_mask, token_labels def UpperCamelCase( self ): return BertGenerationConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , ) def UpperCamelCase( self ): ( ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ) = self.prepare_config_and_inputs() _UpperCAmelCase = True _UpperCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase , ): _UpperCAmelCase = BertGenerationEncoder(config=_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() _UpperCAmelCase = model(_UpperCamelCase , attention_mask=_UpperCamelCase ) _UpperCAmelCase = model(_UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase , ): _UpperCAmelCase = True _UpperCAmelCase = BertGenerationEncoder(config=_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() _UpperCAmelCase = model( _UpperCamelCase , attention_mask=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , encoder_attention_mask=_UpperCamelCase , ) _UpperCAmelCase = model( _UpperCamelCase , attention_mask=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase , ): _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = BertGenerationDecoder(config=_UpperCamelCase ).to(_UpperCamelCase ).eval() # first forward pass _UpperCAmelCase = model( _UpperCamelCase , attention_mask=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , encoder_attention_mask=_UpperCamelCase , use_cache=_UpperCamelCase , ) _UpperCAmelCase = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids _UpperCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size ) _UpperCAmelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and _UpperCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 ) _UpperCAmelCase = torch.cat([input_mask, next_mask] , dim=-1 ) _UpperCAmelCase = model( _UpperCamelCase , attention_mask=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , encoder_attention_mask=_UpperCamelCase , output_hidden_states=_UpperCamelCase , )['''hidden_states'''][0] _UpperCAmelCase = model( _UpperCamelCase , attention_mask=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , encoder_attention_mask=_UpperCamelCase , past_key_values=_UpperCamelCase , output_hidden_states=_UpperCamelCase , )['''hidden_states'''][0] # select random slice _UpperCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item() _UpperCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach() _UpperCAmelCase = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-3 ) ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , *_UpperCamelCase , ): _UpperCAmelCase = BertGenerationDecoder(_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() _UpperCAmelCase = model(_UpperCamelCase , attention_mask=_UpperCamelCase , labels=_UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase( self ): _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.prepare_config_and_inputs() _UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __UpperCamelCase ( A__ , A__ , A__ , unittest.TestCase ): __A : Dict = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else () __A : Tuple = (BertGenerationDecoder,) if is_torch_available() else () __A : Tuple = ( {"""feature-extraction""": BertGenerationEncoder, """text-generation""": BertGenerationDecoder} if is_torch_available() else {} ) def UpperCamelCase( self ): _UpperCAmelCase = BertGenerationEncoderTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=_UpperCamelCase , hidden_size=37 ) def UpperCamelCase( self ): self.config_tester.run_common_tests() def UpperCamelCase( self ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCamelCase ) def UpperCamelCase( self ): _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() _UpperCAmelCase = '''bert''' self.model_tester.create_and_check_model(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) def UpperCamelCase( self ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*_UpperCamelCase ) def UpperCamelCase( self ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*_UpperCamelCase ) def UpperCamelCase( self ): # This regression test was failing with PyTorch < 1.3 ( ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ) = self.model_tester.prepare_config_and_inputs_for_decoder() _UpperCAmelCase = None self.model_tester.create_and_check_model_as_decoder( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) def UpperCamelCase( self ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*_UpperCamelCase ) @slow def UpperCamelCase( self ): _UpperCAmelCase = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' ) self.assertIsNotNone(_UpperCamelCase ) @require_torch class __UpperCamelCase ( unittest.TestCase ): @slow def UpperCamelCase( self ): _UpperCAmelCase = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' ) _UpperCAmelCase = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] ) with torch.no_grad(): _UpperCAmelCase = model(_UpperCamelCase )[0] _UpperCAmelCase = torch.Size([1, 8, 1024] ) self.assertEqual(output.shape , _UpperCamelCase ) _UpperCAmelCase = torch.tensor( [[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCamelCase , atol=1e-4 ) ) @require_torch class __UpperCamelCase ( unittest.TestCase ): @slow def UpperCamelCase( self ): _UpperCAmelCase = BertGenerationDecoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' ) _UpperCAmelCase = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] ) with torch.no_grad(): _UpperCAmelCase = model(_UpperCamelCase )[0] _UpperCAmelCase = torch.Size([1, 8, 50358] ) self.assertEqual(output.shape , _UpperCamelCase ) _UpperCAmelCase = torch.tensor( [[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCamelCase , atol=1e-4 ) )
32
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available UpperCAmelCase_ = { "configuration_xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMOnnxConfig"], "tokenization_xlm": ["XLMTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ "XLM_PRETRAINED_MODEL_ARCHIVE_LIST", "XLMForMultipleChoice", "XLMForQuestionAnswering", "XLMForQuestionAnsweringSimple", "XLMForSequenceClassification", "XLMForTokenClassification", "XLMModel", "XLMPreTrainedModel", "XLMWithLMHeadModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ "TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST", "TFXLMForMultipleChoice", "TFXLMForQuestionAnsweringSimple", "TFXLMForSequenceClassification", "TFXLMForTokenClassification", "TFXLMMainLayer", "TFXLMModel", "TFXLMPreTrainedModel", "TFXLMWithLMHeadModel", ] if TYPE_CHECKING: from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig from .tokenization_xlm import XLMTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm import ( XLM_PRETRAINED_MODEL_ARCHIVE_LIST, XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMPreTrainedModel, XLMWithLMHeadModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm import ( TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMForMultipleChoice, TFXLMForQuestionAnsweringSimple, TFXLMForSequenceClassification, TFXLMForTokenClassification, TFXLMMainLayer, TFXLMModel, TFXLMPreTrainedModel, TFXLMWithLMHeadModel, ) else: import sys UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
32
1
from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __UpperCamelCase ( A__ ): __A : Dict = ["""image_processor""", """tokenizer"""] __A : List[str] = """BridgeTowerImageProcessor""" __A : str = ("""RobertaTokenizer""", """RobertaTokenizerFast""") def __init__( self , _UpperCamelCase , _UpperCamelCase ): super().__init__(_UpperCamelCase , _UpperCamelCase ) def __call__( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = True , _UpperCamelCase = False , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = 0 , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = True , _UpperCamelCase = None , **_UpperCamelCase , ): _UpperCAmelCase = self.tokenizer( text=_UpperCamelCase , add_special_tokens=_UpperCamelCase , padding=_UpperCamelCase , truncation=_UpperCamelCase , max_length=_UpperCamelCase , stride=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_token_type_ids=_UpperCamelCase , return_attention_mask=_UpperCamelCase , return_overflowing_tokens=_UpperCamelCase , return_special_tokens_mask=_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , return_length=_UpperCamelCase , verbose=_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase , ) # add pixel_values + pixel_mask _UpperCAmelCase = self.image_processor( _UpperCamelCase , return_tensors=_UpperCamelCase , do_normalize=_UpperCamelCase , do_center_crop=_UpperCamelCase , **_UpperCamelCase ) encoding.update(_UpperCamelCase ) return encoding def UpperCamelCase( self , *_UpperCamelCase , **_UpperCamelCase ): return self.tokenizer.batch_decode(*_UpperCamelCase , **_UpperCamelCase ) def UpperCamelCase( self , *_UpperCamelCase , **_UpperCamelCase ): return self.tokenizer.decode(*_UpperCamelCase , **_UpperCamelCase ) @property def UpperCamelCase( self ): _UpperCAmelCase = self.tokenizer.model_input_names _UpperCAmelCase = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
32
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { "microsoft/biogpt": "https://huggingface.co/microsoft/biogpt/resolve/main/config.json", # See all BioGPT models at https://huggingface.co/models?filter=biogpt } class __UpperCamelCase ( A__ ): __A : Any = """biogpt""" def __init__( self , _UpperCamelCase=42384 , _UpperCamelCase=1024 , _UpperCamelCase=24 , _UpperCamelCase=16 , _UpperCamelCase=4096 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=1024 , _UpperCamelCase=0.02 , _UpperCamelCase=1e-12 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=0.0 , _UpperCamelCase=0.0 , _UpperCamelCase=1 , _UpperCamelCase=0 , _UpperCamelCase=2 , **_UpperCamelCase , ): _UpperCAmelCase = vocab_size _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = scale_embedding _UpperCAmelCase = use_cache _UpperCAmelCase = layerdrop _UpperCAmelCase = activation_dropout super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
32
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase_ = { "configuration_instructblip": [ "INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", "InstructBlipConfig", "InstructBlipQFormerConfig", "InstructBlipVisionConfig", ], "processing_instructblip": ["InstructBlipProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ "INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "InstructBlipQFormerModel", "InstructBlipPreTrainedModel", "InstructBlipForConditionalGeneration", "InstructBlipVisionModel", ] if TYPE_CHECKING: from .configuration_instructblip import ( INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, InstructBlipConfig, InstructBlipQFormerConfig, InstructBlipVisionConfig, ) from .processing_instructblip import InstructBlipProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_instructblip import ( INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST, InstructBlipForConditionalGeneration, InstructBlipPreTrainedModel, InstructBlipQFormerModel, InstructBlipVisionModel, ) else: import sys UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
32
from typing import List from .keymap import KEYMAP, get_character def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> List[str]: """simple docstring""" def decorator(SCREAMING_SNAKE_CASE_ : List[Any] ): _UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''handle_key''' , [] ) handle += [key] setattr(SCREAMING_SNAKE_CASE_ , '''handle_key''' , SCREAMING_SNAKE_CASE_ ) return func return decorator def A__ ( *SCREAMING_SNAKE_CASE_ : List[str] ) -> Dict: """simple docstring""" def decorator(SCREAMING_SNAKE_CASE_ : Any ): _UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''handle_key''' , [] ) handle += keys setattr(SCREAMING_SNAKE_CASE_ , '''handle_key''' , SCREAMING_SNAKE_CASE_ ) return func return decorator class __UpperCamelCase ( A__ ): def __new__( cls , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): _UpperCAmelCase = super().__new__(cls , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) if not hasattr(_UpperCamelCase , '''key_handler''' ): setattr(_UpperCamelCase , '''key_handler''' , {} ) setattr(_UpperCamelCase , '''handle_input''' , KeyHandler.handle_input ) for value in attrs.values(): _UpperCAmelCase = getattr(_UpperCamelCase , '''handle_key''' , [] ) for key in handled_keys: _UpperCAmelCase = value return new_cls @staticmethod def UpperCamelCase( cls ): _UpperCAmelCase = get_character() if char != KEYMAP["undefined"]: _UpperCAmelCase = ord(_UpperCamelCase ) _UpperCAmelCase = cls.key_handler.get(_UpperCamelCase ) if handler: _UpperCAmelCase = char return handler(cls ) else: return None def A__ ( cls : Union[str, Any] ) -> Any: """simple docstring""" return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
32
1
def A__ ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int ) -> list[str]: """simple docstring""" return [sentence[i : i + ngram_size] for i in range(len(SCREAMING_SNAKE_CASE_ ) - ngram_size + 1 )] if __name__ == "__main__": from doctest import testmod testmod()
32
import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class __UpperCamelCase : def __init__( self , _UpperCamelCase , _UpperCamelCase=13 , _UpperCamelCase=7 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=99 , _UpperCamelCase=24 , _UpperCamelCase=2 , _UpperCamelCase=6 , _UpperCamelCase=37 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=512 , _UpperCamelCase=16 , _UpperCamelCase=2 , _UpperCamelCase=0.02 , _UpperCamelCase=3 , _UpperCamelCase=None , _UpperCamelCase=1000 , ): _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = seq_length _UpperCAmelCase = is_training _UpperCAmelCase = use_input_mask _UpperCAmelCase = use_token_type_ids _UpperCAmelCase = use_labels _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = type_sequence_label_size _UpperCAmelCase = initializer_range _UpperCAmelCase = num_labels _UpperCAmelCase = scope _UpperCAmelCase = range_bbox def UpperCamelCase( self ): _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: _UpperCAmelCase = bbox[i, j, 3] _UpperCAmelCase = bbox[i, j, 1] _UpperCAmelCase = t if bbox[i, j, 2] < bbox[i, j, 0]: _UpperCAmelCase = bbox[i, j, 2] _UpperCAmelCase = bbox[i, j, 0] _UpperCAmelCase = t _UpperCAmelCase = None if self.use_input_mask: _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) _UpperCAmelCase = None if self.use_token_type_ids: _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _UpperCAmelCase = None _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _UpperCAmelCase = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def UpperCamelCase( self ): return LiltConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ): _UpperCAmelCase = LiltModel(config=_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() _UpperCAmelCase = model(_UpperCamelCase , bbox=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase ) _UpperCAmelCase = model(_UpperCamelCase , bbox=_UpperCamelCase , token_type_ids=_UpperCamelCase ) _UpperCAmelCase = model(_UpperCamelCase , bbox=_UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ): _UpperCAmelCase = self.num_labels _UpperCAmelCase = LiltForTokenClassification(config=_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() _UpperCAmelCase = model( _UpperCamelCase , bbox=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ): _UpperCAmelCase = LiltForQuestionAnswering(config=_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() _UpperCAmelCase = model( _UpperCamelCase , bbox=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , start_positions=_UpperCamelCase , end_positions=_UpperCamelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCamelCase( self ): _UpperCAmelCase = self.prepare_config_and_inputs() ( ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ) = config_and_inputs _UpperCAmelCase = { '''input_ids''': input_ids, '''bbox''': bbox, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask, } return config, inputs_dict @require_torch class __UpperCamelCase ( A__ , A__ , A__ , unittest.TestCase ): __A : Dict = ( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) __A : Optional[Any] = ( { """feature-extraction""": LiltModel, """question-answering""": LiltForQuestionAnswering, """text-classification""": LiltForSequenceClassification, """token-classification""": LiltForTokenClassification, """zero-shot""": LiltForSequenceClassification, } if is_torch_available() else {} ) __A : List[Any] = False __A : Optional[int] = False def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): return True def UpperCamelCase( self ): _UpperCAmelCase = LiltModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=_UpperCamelCase , hidden_size=37 ) def UpperCamelCase( self ): self.config_tester.run_common_tests() def UpperCamelCase( self ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCamelCase ) def UpperCamelCase( self ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: _UpperCAmelCase = type self.model_tester.create_and_check_model(*_UpperCamelCase ) def UpperCamelCase( self ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_UpperCamelCase ) def UpperCamelCase( self ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_UpperCamelCase ) @slow def UpperCamelCase( self ): for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCAmelCase = LiltModel.from_pretrained(_UpperCamelCase ) self.assertIsNotNone(_UpperCamelCase ) @require_torch @slow class __UpperCamelCase ( unittest.TestCase ): def UpperCamelCase( self ): _UpperCAmelCase = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(_UpperCamelCase ) _UpperCAmelCase = torch.tensor([[1, 2]] , device=_UpperCamelCase ) _UpperCAmelCase = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=_UpperCamelCase ) # forward pass with torch.no_grad(): _UpperCAmelCase = model(input_ids=_UpperCamelCase , bbox=_UpperCamelCase ) _UpperCAmelCase = torch.Size([1, 2, 768] ) _UpperCAmelCase = torch.tensor( [[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=_UpperCamelCase , ) self.assertTrue(outputs.last_hidden_state.shape , _UpperCamelCase ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , _UpperCamelCase , atol=1e-3 ) )
32
1
import unittest from transformers import AutoTokenizer, is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow if is_flax_available(): import jax.numpy as jnp from transformers import FlaxXLMRobertaModel @require_sentencepiece @require_tokenizers @require_flax class __UpperCamelCase ( unittest.TestCase ): @slow def UpperCamelCase( self ): _UpperCAmelCase = FlaxXLMRobertaModel.from_pretrained('''xlm-roberta-base''' ) _UpperCAmelCase = AutoTokenizer.from_pretrained('''xlm-roberta-base''' ) _UpperCAmelCase = '''The dog is cute and lives in the garden house''' _UpperCAmelCase = jnp.array([tokenizer.encode(_UpperCamelCase )] ) _UpperCAmelCase = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim _UpperCAmelCase = jnp.array( [[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] ) _UpperCAmelCase = model(_UpperCamelCase )['''last_hidden_state'''] self.assertEqual(output.shape , _UpperCamelCase ) # compare the actual values for a slice of last dim self.assertTrue(jnp.allclose(output[:, :, -1] , _UpperCamelCase , atol=1e-3 ) )
32
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { "RWKV/rwkv-4-169m-pile": "https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json", "RWKV/rwkv-4-430m-pile": "https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json", "RWKV/rwkv-4-1b5-pile": "https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json", "RWKV/rwkv-4-3b-pile": "https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json", "RWKV/rwkv-4-7b-pile": "https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json", "RWKV/rwkv-4-14b-pile": "https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json", "RWKV/rwkv-raven-1b5": "https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json", "RWKV/rwkv-raven-3b": "https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json", "RWKV/rwkv-raven-7b": "https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json", "RWKV/rwkv-raven-14b": "https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json", } class __UpperCamelCase ( A__ ): __A : Tuple = """rwkv""" __A : Any = {"""max_position_embeddings""": """context_length"""} def __init__( self , _UpperCamelCase=50277 , _UpperCamelCase=1024 , _UpperCamelCase=4096 , _UpperCamelCase=32 , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=1e-5 , _UpperCamelCase=0 , _UpperCamelCase=0 , _UpperCamelCase=6 , _UpperCamelCase=False , _UpperCamelCase=True , **_UpperCamelCase , ): _UpperCAmelCase = vocab_size _UpperCAmelCase = context_length _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = attention_hidden_size if attention_hidden_size is not None else hidden_size _UpperCAmelCase = intermediate_size if intermediate_size is not None else 4 * hidden_size _UpperCAmelCase = layer_norm_epsilon _UpperCAmelCase = rescale_every _UpperCAmelCase = use_cache _UpperCAmelCase = bos_token_id _UpperCAmelCase = eos_token_id super().__init__( tie_word_embeddings=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
32
1
from argparse import ArgumentParser, Namespace from ..utils import logging from . import BaseTransformersCLICommand def A__ ( SCREAMING_SNAKE_CASE_ : Namespace ) -> Optional[int]: """simple docstring""" return ConvertCommand( args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name ) UpperCAmelCase_ = "\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n" class __UpperCamelCase ( A__ ): @staticmethod def UpperCamelCase( _UpperCamelCase ): _UpperCAmelCase = parser.add_parser( '''convert''' , help='''CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.''' , ) train_parser.add_argument('''--model_type''' , type=_UpperCamelCase , required=_UpperCamelCase , help='''Model\'s type.''' ) train_parser.add_argument( '''--tf_checkpoint''' , type=_UpperCamelCase , required=_UpperCamelCase , help='''TensorFlow checkpoint path or folder.''' ) train_parser.add_argument( '''--pytorch_dump_output''' , type=_UpperCamelCase , required=_UpperCamelCase , help='''Path to the PyTorch saved model output.''' ) train_parser.add_argument('''--config''' , type=_UpperCamelCase , default='''''' , help='''Configuration file path or folder.''' ) train_parser.add_argument( '''--finetuning_task_name''' , type=_UpperCamelCase , default=_UpperCamelCase , help='''Optional fine-tuning task name if the TF model was a finetuned model.''' , ) train_parser.set_defaults(func=_UpperCamelCase ) def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , *_UpperCamelCase , ): _UpperCAmelCase = logging.get_logger('''transformers-cli/converting''' ) self._logger.info(f'''Loading model {model_type}''' ) _UpperCAmelCase = model_type _UpperCAmelCase = tf_checkpoint _UpperCAmelCase = pytorch_dump_output _UpperCAmelCase = config _UpperCAmelCase = finetuning_task_name def UpperCamelCase( self ): if self._model_type == "albert": try: from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(_UpperCamelCase ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "bert": try: from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(_UpperCamelCase ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "funnel": try: from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(_UpperCamelCase ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "t5": try: from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch except ImportError: raise ImportError(_UpperCamelCase ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "gpt": from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import ( convert_openai_checkpoint_to_pytorch, ) convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "transfo_xl": try: from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import ( convert_transfo_xl_checkpoint_to_pytorch, ) except ImportError: raise ImportError(_UpperCamelCase ) if "ckpt" in self._tf_checkpoint.lower(): _UpperCAmelCase = self._tf_checkpoint _UpperCAmelCase = '''''' else: _UpperCAmelCase = self._tf_checkpoint _UpperCAmelCase = '''''' convert_transfo_xl_checkpoint_to_pytorch( _UpperCamelCase , self._config , self._pytorch_dump_output , _UpperCamelCase ) elif self._model_type == "gpt2": try: from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import ( convert_gpta_checkpoint_to_pytorch, ) except ImportError: raise ImportError(_UpperCamelCase ) convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "xlnet": try: from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import ( convert_xlnet_checkpoint_to_pytorch, ) except ImportError: raise ImportError(_UpperCamelCase ) convert_xlnet_checkpoint_to_pytorch( self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name ) elif self._model_type == "xlm": from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import ( convert_xlm_checkpoint_to_pytorch, ) convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output ) elif self._model_type == "lxmert": from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import ( convert_lxmert_checkpoint_to_pytorch, ) convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output ) elif self._model_type == "rembert": from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import ( convert_rembert_tf_checkpoint_to_pytorch, ) convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) else: raise ValueError( '''--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]''' )
32
def A__ ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> str: """simple docstring""" if a < 0 or b < 0: raise ValueError('''the value of both inputs must be positive''' ) _UpperCAmelCase = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] # remove the leading "0b" _UpperCAmelCase = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] # remove the leading "0b" _UpperCAmelCase = max(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) ) return "0b" + "".join( str(int(char_a == '''1''' and char_b == '''1''' ) ) for char_a, char_b in zip(a_binary.zfill(SCREAMING_SNAKE_CASE_ ) , b_binary.zfill(SCREAMING_SNAKE_CASE_ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
32
1
import gc import unittest import numpy as np import torch from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class __UpperCamelCase ( A__ , unittest.TestCase ): __A : Any = DanceDiffusionPipeline __A : Any = UNCONDITIONAL_AUDIO_GENERATION_PARAMS __A : Tuple = PipelineTesterMixin.required_optional_params - { """callback""", """latents""", """callback_steps""", """output_type""", """num_images_per_prompt""", } __A : Tuple = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS __A : List[str] = False __A : str = False def UpperCamelCase( self ): torch.manual_seed(0 ) _UpperCAmelCase = UNetaDModel( block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=16000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=_UpperCamelCase , use_timestep_embedding=_UpperCamelCase , time_embedding_type='''fourier''' , mid_block_type='''UNetMidBlock1D''' , down_block_types=('''DownBlock1DNoSkip''', '''DownBlock1D''', '''AttnDownBlock1D''') , up_block_types=('''AttnUpBlock1D''', '''UpBlock1D''', '''UpBlock1DNoSkip''') , ) _UpperCAmelCase = IPNDMScheduler() _UpperCAmelCase = { '''unet''': unet, '''scheduler''': scheduler, } return components def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase=0 ): if str(_UpperCamelCase ).startswith('''mps''' ): _UpperCAmelCase = torch.manual_seed(_UpperCamelCase ) else: _UpperCAmelCase = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase ) _UpperCAmelCase = { '''batch_size''': 1, '''generator''': generator, '''num_inference_steps''': 4, } return inputs def UpperCamelCase( self ): _UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator _UpperCAmelCase = self.get_dummy_components() _UpperCAmelCase = DanceDiffusionPipeline(**_UpperCamelCase ) _UpperCAmelCase = pipe.to(_UpperCamelCase ) pipe.set_progress_bar_config(disable=_UpperCamelCase ) _UpperCAmelCase = self.get_dummy_inputs(_UpperCamelCase ) _UpperCAmelCase = pipe(**_UpperCamelCase ) _UpperCAmelCase = output.audios _UpperCAmelCase = audio[0, -3:, -3:] assert audio.shape == (1, 2, components["unet"].sample_size) _UpperCAmelCase = np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2 @skip_mps def UpperCamelCase( self ): return super().test_save_load_local() @skip_mps def UpperCamelCase( self ): return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 ) @skip_mps def UpperCamelCase( self ): return super().test_save_load_optional_components() @skip_mps def UpperCamelCase( self ): return super().test_attention_slicing_forward_pass() def UpperCamelCase( self ): super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class __UpperCamelCase ( unittest.TestCase ): def UpperCamelCase( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase( self ): _UpperCAmelCase = torch_device _UpperCAmelCase = DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' ) _UpperCAmelCase = pipe.to(_UpperCamelCase ) pipe.set_progress_bar_config(disable=_UpperCamelCase ) _UpperCAmelCase = torch.manual_seed(0 ) _UpperCAmelCase = pipe(generator=_UpperCamelCase , num_inference_steps=100 , audio_length_in_s=4.096 ) _UpperCAmelCase = output.audios _UpperCAmelCase = audio[0, -3:, -3:] assert audio.shape == (1, 2, pipe.unet.sample_size) _UpperCAmelCase = np.array([-0.0192, -0.0231, -0.0318, -0.0059, 0.0002, -0.0020] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2 def UpperCamelCase( self ): _UpperCAmelCase = torch_device _UpperCAmelCase = DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' , torch_dtype=torch.floataa ) _UpperCAmelCase = pipe.to(_UpperCamelCase ) pipe.set_progress_bar_config(disable=_UpperCamelCase ) _UpperCAmelCase = torch.manual_seed(0 ) _UpperCAmelCase = pipe(generator=_UpperCamelCase , num_inference_steps=100 , audio_length_in_s=4.096 ) _UpperCAmelCase = output.audios _UpperCAmelCase = audio[0, -3:, -3:] assert audio.shape == (1, 2, pipe.unet.sample_size) _UpperCAmelCase = np.array([-0.0367, -0.0488, -0.0771, -0.0525, -0.0444, -0.0341] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
32
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { "tiiuae/falcon-40b": "https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json", "tiiuae/falcon-7b": "https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json", } class __UpperCamelCase ( A__ ): __A : Dict = """falcon""" __A : Any = ["""past_key_values"""] def __init__( self , _UpperCamelCase=65024 , _UpperCamelCase=4544 , _UpperCamelCase=32 , _UpperCamelCase=71 , _UpperCamelCase=1e-5 , _UpperCamelCase=0.02 , _UpperCamelCase=True , _UpperCamelCase=0.0 , _UpperCamelCase=0.0 , _UpperCamelCase=None , _UpperCamelCase=False , _UpperCamelCase=False , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=False , _UpperCamelCase=11 , _UpperCamelCase=11 , **_UpperCamelCase , ): _UpperCAmelCase = vocab_size # Backward compatibility with n_embed kwarg _UpperCAmelCase = kwargs.pop('''n_embed''' , _UpperCamelCase ) _UpperCAmelCase = hidden_size if n_embed is None else n_embed _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = layer_norm_epsilon _UpperCAmelCase = initializer_range _UpperCAmelCase = use_cache _UpperCAmelCase = hidden_dropout _UpperCAmelCase = attention_dropout _UpperCAmelCase = bos_token_id _UpperCAmelCase = eos_token_id _UpperCAmelCase = num_attention_heads if num_kv_heads is None else num_kv_heads _UpperCAmelCase = alibi _UpperCAmelCase = new_decoder_architecture _UpperCAmelCase = multi_query # Ignored when new_decoder_architecture is True _UpperCAmelCase = parallel_attn _UpperCAmelCase = bias super().__init__(bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase ) @property def UpperCamelCase( self ): return self.hidden_size // self.num_attention_heads @property def UpperCamelCase( self ): return not self.alibi
32
1
from __future__ import annotations def A__ ( SCREAMING_SNAKE_CASE_ : list[int] ) -> int: """simple docstring""" if not nums: return 0 _UpperCAmelCase = nums[0] _UpperCAmelCase = 0 for num in nums[1:]: _UpperCAmelCase , _UpperCAmelCase = ( max_excluding + num, max(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ), ) return max(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": import doctest doctest.testmod()
32
from math import sqrt def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> bool: """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def A__ ( SCREAMING_SNAKE_CASE_ : int = 1_00_01 ) -> int: """simple docstring""" _UpperCAmelCase = 0 _UpperCAmelCase = 1 while count != nth and number < 3: number += 1 if is_prime(SCREAMING_SNAKE_CASE_ ): count += 1 while count != nth: number += 2 if is_prime(SCREAMING_SNAKE_CASE_ ): count += 1 return number if __name__ == "__main__": print(f'''{solution() = }''')
32
1
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from accelerate import PartialState from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce def A__ ( SCREAMING_SNAKE_CASE_ : List[str] ) -> Dict: """simple docstring""" return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device ) def A__ ( SCREAMING_SNAKE_CASE_ : Tuple ) -> List[str]: """simple docstring""" _UpperCAmelCase = create_tensor(SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = gather(SCREAMING_SNAKE_CASE_ ) assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) ) def A__ ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> str: """simple docstring""" _UpperCAmelCase = [state.process_index] _UpperCAmelCase = gather_object(SCREAMING_SNAKE_CASE_ ) assert len(SCREAMING_SNAKE_CASE_ ) == state.num_processes, F'''{gathered_obj}, {len(SCREAMING_SNAKE_CASE_ )} != {state.num_processes}''' assert gathered_obj == list(range(state.num_processes ) ), F'''{gathered_obj} != {list(range(state.num_processes ) )}''' def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = create_tensor(SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = broadcast(SCREAMING_SNAKE_CASE_ ) assert broadcasted_tensor.shape == torch.Size([state.num_processes] ) assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) ) def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> Dict: """simple docstring""" if state.is_main_process: _UpperCAmelCase = torch.arange(state.num_processes + 1 ).to(state.device ) else: _UpperCAmelCase = torch.arange(state.num_processes ).to(state.device ) _UpperCAmelCase = pad_across_processes(SCREAMING_SNAKE_CASE_ ) assert padded_tensor.shape == torch.Size([state.num_processes + 1] ) if not state.is_main_process: assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0] def A__ ( SCREAMING_SNAKE_CASE_ : Any ) -> Dict: """simple docstring""" if state.num_processes != 2: return _UpperCAmelCase = create_tensor(SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = reduce(SCREAMING_SNAKE_CASE_ , '''sum''' ) _UpperCAmelCase = torch.tensor([4.0, 6] ).to(state.device ) assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ), F'''{reduced_tensor} != {truth_tensor}''' def A__ ( SCREAMING_SNAKE_CASE_ : Tuple ) -> Optional[int]: """simple docstring""" if state.num_processes != 2: return _UpperCAmelCase = create_tensor(SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = reduce(SCREAMING_SNAKE_CASE_ , '''mean''' ) _UpperCAmelCase = torch.tensor([2.0, 3] ).to(state.device ) assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ), F'''{reduced_tensor} != {truth_tensor}''' def A__ ( SCREAMING_SNAKE_CASE_ : Any ) -> List[str]: """simple docstring""" main() def A__ ( ) -> Dict: """simple docstring""" _UpperCAmelCase = PartialState() state.print(F'''State: {state}''' ) state.print('''testing gather''' ) test_gather(SCREAMING_SNAKE_CASE_ ) state.print('''testing gather_object''' ) test_gather_object(SCREAMING_SNAKE_CASE_ ) state.print('''testing broadcast''' ) test_broadcast(SCREAMING_SNAKE_CASE_ ) state.print('''testing pad_across_processes''' ) test_pad_across_processes(SCREAMING_SNAKE_CASE_ ) state.print('''testing reduce_sum''' ) test_reduce_sum(SCREAMING_SNAKE_CASE_ ) state.print('''testing reduce_mean''' ) test_reduce_mean(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": main()
32
def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> bool: """simple docstring""" if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): _UpperCAmelCase = F'''Input value of [number={number}] must be an integer''' raise TypeError(SCREAMING_SNAKE_CASE_ ) if number < 0: return False _UpperCAmelCase = number * number while number > 0: if number % 10 != number_square % 10: return False number //= 10 number_square //= 10 return True if __name__ == "__main__": import doctest doctest.testmod()
32
1
import copy from typing import Dict, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING from ..detr import DetrConfig from ..swin import SwinConfig UpperCAmelCase_ = { "facebook/maskformer-swin-base-ade": ( "https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json" ) # See all MaskFormer models at https://huggingface.co/models?filter=maskformer } UpperCAmelCase_ = logging.get_logger(__name__) class __UpperCamelCase ( A__ ): __A : Optional[int] = """maskformer""" __A : List[str] = {"""hidden_size""": """mask_feature_size"""} __A : Union[str, Any] = ["""resnet""", """swin"""] __A : Union[str, Any] = ["""detr"""] def __init__( self , _UpperCamelCase = 256 , _UpperCamelCase = 256 , _UpperCamelCase = 0.1 , _UpperCamelCase = False , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = 0.02 , _UpperCamelCase = 1.0 , _UpperCamelCase = 1.0 , _UpperCamelCase = 1.0 , _UpperCamelCase = 20.0 , _UpperCamelCase = None , **_UpperCamelCase , ): if backbone_config is None: # fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k _UpperCAmelCase = SwinConfig( image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , ) if isinstance(_UpperCamelCase , _UpperCamelCase ): _UpperCAmelCase = backbone_config.pop('''model_type''' ) _UpperCAmelCase = CONFIG_MAPPING[backbone_model_type] _UpperCAmelCase = config_class.from_dict(_UpperCamelCase ) # verify that the backbone is supported if backbone_config.model_type not in self.backbones_supported: logger.warning_once( f'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. ''' f'''Supported model types: {','.join(self.backbones_supported )}''' ) if decoder_config is None: # fall back to https://huggingface.co/facebook/detr-resnet-50 _UpperCAmelCase = DetrConfig() else: # verify that the decoder is supported _UpperCAmelCase = ( decoder_config.pop('''model_type''' ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else decoder_config.model_type ) if decoder_type not in self.decoders_supported: raise ValueError( f'''Transformer Decoder {decoder_type} not supported, please use one of''' f''' {','.join(self.decoders_supported )}''' ) if isinstance(_UpperCamelCase , _UpperCamelCase ): _UpperCAmelCase = CONFIG_MAPPING[decoder_type] _UpperCAmelCase = config_class.from_dict(_UpperCamelCase ) _UpperCAmelCase = backbone_config _UpperCAmelCase = decoder_config # main feature dimension for the model _UpperCAmelCase = fpn_feature_size _UpperCAmelCase = mask_feature_size # initializer _UpperCAmelCase = init_std _UpperCAmelCase = init_xavier_std # Hungarian matcher && loss _UpperCAmelCase = cross_entropy_weight _UpperCAmelCase = dice_weight _UpperCAmelCase = mask_weight _UpperCAmelCase = use_auxiliary_loss _UpperCAmelCase = no_object_weight _UpperCAmelCase = output_auxiliary_logits _UpperCAmelCase = self.decoder_config.encoder_attention_heads _UpperCAmelCase = self.decoder_config.num_hidden_layers super().__init__(**_UpperCamelCase ) @classmethod def UpperCamelCase( cls , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ): return cls( backbone_config=_UpperCamelCase , decoder_config=_UpperCamelCase , **_UpperCamelCase , ) def UpperCamelCase( self ): _UpperCAmelCase = copy.deepcopy(self.__dict__ ) _UpperCAmelCase = self.backbone_config.to_dict() _UpperCAmelCase = self.decoder_config.to_dict() _UpperCAmelCase = self.__class__.model_type return output
32
from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Value from .base import TaskTemplate @dataclass(frozen=A__ ) class __UpperCamelCase ( A__ ): __A : str = field(default="""language-modeling""" , metadata={"""include_in_asdict_even_if_is_default""": True} ) __A : ClassVar[Features] = Features({"""text""": Value("""string""" )} ) __A : ClassVar[Features] = Features({} ) __A : str = "text" @property def UpperCamelCase( self ): return {self.text_column: "text"}
32
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available UpperCAmelCase_ = { "configuration_ctrl": ["CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP", "CTRLConfig"], "tokenization_ctrl": ["CTRLTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ "CTRL_PRETRAINED_MODEL_ARCHIVE_LIST", "CTRLForSequenceClassification", "CTRLLMHeadModel", "CTRLModel", "CTRLPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ "TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST", "TFCTRLForSequenceClassification", "TFCTRLLMHeadModel", "TFCTRLModel", "TFCTRLPreTrainedModel", ] if TYPE_CHECKING: from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig from .tokenization_ctrl import CTRLTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ctrl import ( CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel, CTRLPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_ctrl import ( TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, TFCTRLForSequenceClassification, TFCTRLLMHeadModel, TFCTRLModel, TFCTRLPreTrainedModel, ) else: import sys UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
32
import os import re import warnings from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer if TYPE_CHECKING: from ...tokenization_utils_base import TextInput from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = {"vocab_file": "spiece.model"} UpperCAmelCase_ = { "vocab_file": { "t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model", "t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model", "t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model", "t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model", "t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model", } } # TODO(PVP) - this should be removed in Transformers v5 UpperCAmelCase_ = { "t5-small": 5_12, "t5-base": 5_12, "t5-large": 5_12, "t5-3b": 5_12, "t5-11b": 5_12, } UpperCAmelCase_ = "▁" class __UpperCamelCase ( A__ ): __A : Any = VOCAB_FILES_NAMES __A : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP __A : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __A : Tuple = ["""input_ids""", """attention_mask"""] def __init__( self , _UpperCamelCase , _UpperCamelCase="</s>" , _UpperCamelCase="<unk>" , _UpperCamelCase="<pad>" , _UpperCamelCase=100 , _UpperCamelCase=None , _UpperCamelCase = None , _UpperCamelCase=True , **_UpperCamelCase , ): # Add extra_ids to the special token list if extra_ids > 0 and additional_special_tokens is None: _UpperCAmelCase = [f'''<extra_id_{i}>''' for i in range(_UpperCamelCase )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra_id special tokens _UpperCAmelCase = len(set(filter(lambda _UpperCamelCase : bool('''extra_id''' in str(_UpperCamelCase ) ) , _UpperCamelCase ) ) ) if extra_tokens != extra_ids: raise ValueError( f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are''' ''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids''' ''' tokens''' ) if legacy: logger.warning_once( f'''You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to''' ''' read the related pull request available at https://github.com/huggingface/transformers/pull/24565''' ) _UpperCAmelCase = legacy _UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , pad_token=_UpperCamelCase , extra_ids=_UpperCamelCase , additional_special_tokens=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , legacy=_UpperCamelCase , **_UpperCamelCase , ) _UpperCAmelCase = vocab_file _UpperCAmelCase = extra_ids _UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_UpperCamelCase ) @staticmethod def UpperCamelCase( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes: _UpperCAmelCase = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path] if init_max_model_length is not None and init_max_model_length != max_model_length: return init_max_model_length elif init_max_model_length is None: warnings.warn( '''This tokenizer was incorrectly instantiated with a model max length of''' f''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this''' ''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with''' ''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on''' f''' {pretrained_model_name_or_path} automatically truncating your input to''' f''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences''' f''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with''' ''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please''' ''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , _UpperCamelCase , ) return max_model_length @property def UpperCamelCase( self ): return self.sp_model.get_piece_size() + self._extra_ids def UpperCamelCase( self ): _UpperCAmelCase = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase ) # normal case: some special tokens if token_ids_a is None: return ([0] * len(_UpperCamelCase )) + [1] return ([0] * len(_UpperCamelCase )) + [1] + ([0] * len(_UpperCamelCase )) + [1] def UpperCamelCase( self ): return list( set(filter(lambda _UpperCamelCase : bool(re.search(R'''<extra_id_\d+>''' , _UpperCamelCase ) ) is not None , self.additional_special_tokens ) ) ) def UpperCamelCase( self ): return [self._convert_token_to_id(_UpperCamelCase ) for token in self.get_sentinel_tokens()] def UpperCamelCase( self , _UpperCamelCase ): if len(_UpperCamelCase ) > 0 and token_ids[-1] == self.eos_token_id: warnings.warn( f'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated''' ''' eos tokens being added.''' ) return token_ids else: return token_ids + [self.eos_token_id] def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None ): _UpperCAmelCase = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None ): _UpperCAmelCase = self._add_eos_if_not_present(_UpperCamelCase ) if token_ids_a is None: return token_ids_a else: _UpperCAmelCase = self._add_eos_if_not_present(_UpperCamelCase ) return token_ids_a + token_ids_a def __getstate__( self ): _UpperCAmelCase = self.__dict__.copy() _UpperCAmelCase = None return state def __setstate__( self , _UpperCamelCase ): _UpperCAmelCase = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): _UpperCAmelCase = {} _UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def UpperCamelCase( self , _UpperCamelCase , **_UpperCamelCase ): # Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at # the beginning of the text if not self.legacy: _UpperCAmelCase = SPIECE_UNDERLINE + text.replace(_UpperCamelCase , ''' ''' ) return super().tokenize(_UpperCamelCase , **_UpperCamelCase ) def UpperCamelCase( self , _UpperCamelCase , **_UpperCamelCase ): if not self.legacy: _UpperCAmelCase = text.startswith(_UpperCamelCase ) if is_first: _UpperCAmelCase = text[1:] _UpperCAmelCase = self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase ) if not self.legacy and not is_first and not text.startswith(''' ''' ) and tokens[0].startswith(_UpperCamelCase ): _UpperCAmelCase = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:] return tokens def UpperCamelCase( self , _UpperCamelCase ): if token.startswith('''<extra_id_''' ): _UpperCAmelCase = re.match(R'''<extra_id_(\d+)>''' , _UpperCamelCase ) _UpperCAmelCase = int(match.group(1 ) ) return self.vocab_size - num - 1 return self.sp_model.piece_to_id(_UpperCamelCase ) def UpperCamelCase( self , _UpperCamelCase ): if index < self.sp_model.get_piece_size(): _UpperCAmelCase = self.sp_model.IdToPiece(_UpperCamelCase ) else: _UpperCAmelCase = f'''<extra_id_{self.vocab_size - 1 - index}>''' return token def UpperCamelCase( self , _UpperCamelCase ): _UpperCAmelCase = [] _UpperCAmelCase = '''''' _UpperCAmelCase = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(_UpperCamelCase ) + token _UpperCAmelCase = True _UpperCAmelCase = [] else: current_sub_tokens.append(_UpperCamelCase ) _UpperCAmelCase = False out_string += self.sp_model.decode(_UpperCamelCase ) return out_string.strip() def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None ): if not os.path.isdir(_UpperCamelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return _UpperCAmelCase = os.path.join( _UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _UpperCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(_UpperCamelCase , '''wb''' ) as fi: _UpperCAmelCase = self.sp_model.serialized_model_proto() fi.write(_UpperCamelCase ) return (out_vocab_file,)
32
1
from . import __version__ # Backward compatibility imports, to make sure all those objects can be found in file_utils from .utils import ( CLOUDFRONT_DISTRIB_PREFIX, CONFIG_NAME, DISABLE_TELEMETRY, DUMMY_INPUTS, DUMMY_MASK, ENV_VARS_TRUE_AND_AUTO_VALUES, ENV_VARS_TRUE_VALUES, FEATURE_EXTRACTOR_NAME, FLAX_WEIGHTS_NAME, HF_MODULES_CACHE, HUGGINGFACE_CO_PREFIX, HUGGINGFACE_CO_RESOLVE_ENDPOINT, MODEL_CARD_NAME, MULTIPLE_CHOICE_DUMMY_INPUTS, PYTORCH_PRETRAINED_BERT_CACHE, PYTORCH_TRANSFORMERS_CACHE, S3_BUCKET_PREFIX, SENTENCEPIECE_UNDERLINE, SPIECE_UNDERLINE, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME, TORCH_FX_REQUIRED_VERSION, TRANSFORMERS_CACHE, TRANSFORMERS_DYNAMIC_MODULE_NAME, USE_JAX, USE_TF, USE_TORCH, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ContextManagers, DummyObject, EntryNotFoundError, ExplicitEnum, ModelOutput, PaddingStrategy, PushToHubMixin, RepositoryNotFoundError, RevisionNotFoundError, TensorType, _LazyModule, add_code_sample_docstrings, add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, cached_property, copy_func, default_cache_path, define_sagemaker_information, get_cached_models, get_file_from_repo, get_full_repo_name, get_torch_version, has_file, http_user_agent, is_apex_available, is_bsa_available, is_coloredlogs_available, is_datasets_available, is_detectrona_available, is_faiss_available, is_flax_available, is_ftfy_available, is_in_notebook, is_ipex_available, is_librosa_available, is_offline_mode, is_onnx_available, is_pandas_available, is_phonemizer_available, is_protobuf_available, is_psutil_available, is_pyanvml_available, is_pyctcdecode_available, is_pytesseract_available, is_pytorch_quantization_available, is_rjieba_available, is_sagemaker_dp_enabled, is_sagemaker_mp_enabled, is_scipy_available, is_sentencepiece_available, is_seqio_available, is_sklearn_available, is_soundfile_availble, is_spacy_available, is_speech_available, is_tensor, is_tensorflow_probability_available, is_tfaonnx_available, is_tf_available, is_timm_available, is_tokenizers_available, is_torch_available, is_torch_bfaa_available, is_torch_cuda_available, is_torch_fx_available, is_torch_fx_proxy, is_torch_mps_available, is_torch_tfaa_available, is_torch_tpu_available, is_torchaudio_available, is_training_run_on_sagemaker, is_vision_available, replace_return_docstrings, requires_backends, to_numpy, to_py_obj, torch_only_method, )
32
from __future__ import annotations def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> bool: """simple docstring""" _UpperCAmelCase = str(SCREAMING_SNAKE_CASE_ ) return len(SCREAMING_SNAKE_CASE_ ) == 9 and set(SCREAMING_SNAKE_CASE_ ) == set('''123456789''' ) def A__ ( ) -> int | None: """simple docstring""" for base_num in range(99_99 , 49_99 , -1 ): _UpperCAmelCase = 10_00_02 * base_num if is_9_pandigital(SCREAMING_SNAKE_CASE_ ): return candidate for base_num in range(3_33 , 99 , -1 ): _UpperCAmelCase = 1_00_20_03 * base_num if is_9_pandigital(SCREAMING_SNAKE_CASE_ ): return candidate return None if __name__ == "__main__": print(f'''{solution() = }''')
32
1
import logging import os import sys from dataclasses import dataclass, field from importlib import import_module from typing import Dict, List, Optional, Tuple import numpy as np from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score from torch import nn from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask import transformers from transformers import ( AutoConfig, AutoModelForTokenClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process UpperCAmelCase_ = logging.getLogger(__name__) @dataclass class __UpperCamelCase : __A : str = field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) __A : Optional[str] = field( default=A__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) __A : Optional[str] = field( default="""NER""" , metadata={"""help""": """Task type to fine tune in training (e.g. NER, POS, etc)"""} ) __A : Optional[str] = field( default=A__ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) __A : bool = field(default=A__ , metadata={"""help""": """Set this flag to use fast tokenization."""} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. __A : Optional[str] = field( default=A__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) @dataclass class __UpperCamelCase : __A : str = field( metadata={"""help""": """The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."""} ) __A : Optional[str] = field( default=A__ , metadata={"""help""": """Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."""} , ) __A : int = field( default=1_28 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) __A : bool = field( default=A__ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} ) def A__ ( ) -> List[str]: """simple docstring""" _UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' ''' --overwrite_output_dir to overcome.''' ) _UpperCAmelCase = import_module('''tasks''' ) try: _UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , model_args.task_type ) _UpperCAmelCase = token_classification_task_clazz() except AttributeError: raise ValueError( F'''Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. ''' F'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( '''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('''Training/evaluation parameters %s''' , SCREAMING_SNAKE_CASE_ ) # Set seed set_seed(training_args.seed ) # Prepare CONLL-2003 task _UpperCAmelCase = token_classification_task.get_labels(data_args.labels ) _UpperCAmelCase = dict(enumerate(SCREAMING_SNAKE_CASE_ ) ) _UpperCAmelCase = len(SCREAMING_SNAKE_CASE_ ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _UpperCAmelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=SCREAMING_SNAKE_CASE_ , idalabel=SCREAMING_SNAKE_CASE_ , labelaid={label: i for i, label in enumerate(SCREAMING_SNAKE_CASE_ )} , cache_dir=model_args.cache_dir , ) _UpperCAmelCase = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , ) _UpperCAmelCase = AutoModelForTokenClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE_ , cache_dir=model_args.cache_dir , ) # Get datasets _UpperCAmelCase = ( TokenClassificationDataset( token_classification_task=SCREAMING_SNAKE_CASE_ , data_dir=data_args.data_dir , tokenizer=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) _UpperCAmelCase = ( TokenClassificationDataset( token_classification_task=SCREAMING_SNAKE_CASE_ , data_dir=data_args.data_dir , tokenizer=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def align_predictions(SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray ) -> Tuple[List[int], List[int]]: _UpperCAmelCase = np.argmax(SCREAMING_SNAKE_CASE_ , axis=2 ) _UpperCAmelCase , _UpperCAmelCase = preds.shape _UpperCAmelCase = [[] for _ in range(SCREAMING_SNAKE_CASE_ )] _UpperCAmelCase = [[] for _ in range(SCREAMING_SNAKE_CASE_ )] for i in range(SCREAMING_SNAKE_CASE_ ): for j in range(SCREAMING_SNAKE_CASE_ ): if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index: out_label_list[i].append(label_map[label_ids[i][j]] ) preds_list[i].append(label_map[preds[i][j]] ) return preds_list, out_label_list def compute_metrics(SCREAMING_SNAKE_CASE_ : EvalPrediction ) -> Dict: _UpperCAmelCase , _UpperCAmelCase = align_predictions(p.predictions , p.label_ids ) return { "accuracy_score": accuracy_score(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ), "precision": precision_score(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ), "recall": recall_score(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ), "f1": fa_score(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ), } # Data collator _UpperCAmelCase = DataCollatorWithPadding(SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer _UpperCAmelCase = Trainer( model=SCREAMING_SNAKE_CASE_ , args=SCREAMING_SNAKE_CASE_ , train_dataset=SCREAMING_SNAKE_CASE_ , eval_dataset=SCREAMING_SNAKE_CASE_ , compute_metrics=SCREAMING_SNAKE_CASE_ , data_collator=SCREAMING_SNAKE_CASE_ , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_process_zero(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation _UpperCAmelCase = {} if training_args.do_eval: logger.info('''*** Evaluate ***''' ) _UpperCAmelCase = trainer.evaluate() _UpperCAmelCase = os.path.join(training_args.output_dir , '''eval_results.txt''' ) if trainer.is_world_process_zero(): with open(SCREAMING_SNAKE_CASE_ , '''w''' ) as writer: logger.info('''***** Eval results *****''' ) for key, value in result.items(): logger.info(''' %s = %s''' , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) writer.write('''%s = %s\n''' % (key, value) ) results.update(SCREAMING_SNAKE_CASE_ ) # Predict if training_args.do_predict: _UpperCAmelCase = TokenClassificationDataset( token_classification_task=SCREAMING_SNAKE_CASE_ , data_dir=data_args.data_dir , tokenizer=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = trainer.predict(SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase , _UpperCAmelCase = align_predictions(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = os.path.join(training_args.output_dir , '''test_results.txt''' ) if trainer.is_world_process_zero(): with open(SCREAMING_SNAKE_CASE_ , '''w''' ) as writer: for key, value in metrics.items(): logger.info(''' %s = %s''' , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) writer.write('''%s = %s\n''' % (key, value) ) # Save predictions _UpperCAmelCase = os.path.join(training_args.output_dir , '''test_predictions.txt''' ) if trainer.is_world_process_zero(): with open(SCREAMING_SNAKE_CASE_ , '''w''' ) as writer: with open(os.path.join(data_args.data_dir , '''test.txt''' ) , '''r''' ) as f: token_classification_task.write_predictions_to_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return results def A__ ( SCREAMING_SNAKE_CASE_ : Dict ) -> Any: """simple docstring""" main() if __name__ == "__main__": main()
32
import numpy as np def A__ ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : float ) -> np.ndarray: """simple docstring""" return np.where(vector > 0 , SCREAMING_SNAKE_CASE_ , (alpha * (np.exp(SCREAMING_SNAKE_CASE_ ) - 1)) ) if __name__ == "__main__": import doctest doctest.testmod()
32
1
from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Value from .base import TaskTemplate @dataclass(frozen=A__ ) class __UpperCamelCase ( A__ ): __A : str = field(default="""language-modeling""" , metadata={"""include_in_asdict_even_if_is_default""": True} ) __A : ClassVar[Features] = Features({"""text""": Value("""string""" )} ) __A : ClassVar[Features] = Features({} ) __A : str = "text" @property def UpperCamelCase( self ): return {self.text_column: "text"}
32
UpperCAmelCase_ = { "A": ".-", "B": "-...", "C": "-.-.", "D": "-..", "E": ".", "F": "..-.", "G": "--.", "H": "....", "I": "..", "J": ".---", "K": "-.-", "L": ".-..", "M": "--", "N": "-.", "O": "---", "P": ".--.", "Q": "--.-", "R": ".-.", "S": "...", "T": "-", "U": "..-", "V": "...-", "W": ".--", "X": "-..-", "Y": "-.--", "Z": "--..", "1": ".----", "2": "..---", "3": "...--", "4": "....-", "5": ".....", "6": "-....", "7": "--...", "8": "---..", "9": "----.", "0": "-----", "&": ".-...", "@": ".--.-.", ":": "---...", ",": "--..--", ".": ".-.-.-", "'": ".----.", "\"": ".-..-.", "?": "..--..", "/": "-..-.", "=": "-...-", "+": ".-.-.", "-": "-....-", "(": "-.--.", ")": "-.--.-", "!": "-.-.--", " ": "/" } # Exclamation mark is not in ITU-R recommendation # fmt: on UpperCAmelCase_ = {value: key for key, value in MORSE_CODE_DICT.items()} def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> str: """simple docstring""" return " ".join(MORSE_CODE_DICT[char] for char in message.upper() ) def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> str: """simple docstring""" return "".join(REVERSE_DICT[char] for char in message.split() ) def A__ ( ) -> None: """simple docstring""" _UpperCAmelCase = '''Morse code here!''' print(SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = encrypt(SCREAMING_SNAKE_CASE_ ) print(SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = decrypt(SCREAMING_SNAKE_CASE_ ) print(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": main()
32
1
import time from dataclasses import dataclass from multiprocessing import Pool from unittest import TestCase from unittest.mock import patch import multiprocess import numpy as np import pytest from datasets.utils.py_utils import ( NestedDataStructure, asdict, iflatmap_unordered, map_nested, temp_seed, temporary_assignment, zip_dict, ) from .utils import require_tf, require_torch def A__ ( SCREAMING_SNAKE_CASE_ : Tuple ) -> Any: # picklable for multiprocessing """simple docstring""" return x.sum() def A__ ( SCREAMING_SNAKE_CASE_ : Tuple ) -> Union[str, Any]: # picklable for multiprocessing """simple docstring""" return i + 1 @dataclass class __UpperCamelCase : __A : int __A : str class __UpperCamelCase ( A__ ): def UpperCamelCase( self ): _UpperCAmelCase = {} _UpperCAmelCase = [] _UpperCAmelCase = 1 _UpperCAmelCase = [1, 2] _UpperCAmelCase = {'''a''': 1, '''b''': 2} _UpperCAmelCase = {'''a''': [1, 2], '''b''': [3, 4]} _UpperCAmelCase = {'''a''': {'''1''': 1}, '''b''': 2} _UpperCAmelCase = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4} _UpperCAmelCase = {} _UpperCAmelCase = [] _UpperCAmelCase = 2 _UpperCAmelCase = [2, 3] _UpperCAmelCase = {'''a''': 2, '''b''': 3} _UpperCAmelCase = {'''a''': [2, 3], '''b''': [4, 5]} _UpperCAmelCase = {'''a''': {'''1''': 2}, '''b''': 3} _UpperCAmelCase = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5} self.assertEqual(map_nested(_UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase ) self.assertEqual(map_nested(_UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase ) self.assertEqual(map_nested(_UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase ) self.assertEqual(map_nested(_UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase ) self.assertEqual(map_nested(_UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase ) self.assertEqual(map_nested(_UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase ) self.assertEqual(map_nested(_UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase ) self.assertEqual(map_nested(_UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase ) _UpperCAmelCase = 2 self.assertEqual(map_nested(_UpperCamelCase , _UpperCamelCase , num_proc=_UpperCamelCase ) , _UpperCamelCase ) self.assertEqual(map_nested(_UpperCamelCase , _UpperCamelCase , num_proc=_UpperCamelCase ) , _UpperCamelCase ) self.assertEqual(map_nested(_UpperCamelCase , _UpperCamelCase , num_proc=_UpperCamelCase ) , _UpperCamelCase ) self.assertEqual(map_nested(_UpperCamelCase , _UpperCamelCase , num_proc=_UpperCamelCase ) , _UpperCamelCase ) self.assertEqual(map_nested(_UpperCamelCase , _UpperCamelCase , num_proc=_UpperCamelCase ) , _UpperCamelCase ) self.assertEqual(map_nested(_UpperCamelCase , _UpperCamelCase , num_proc=_UpperCamelCase ) , _UpperCamelCase ) self.assertEqual(map_nested(_UpperCamelCase , _UpperCamelCase , num_proc=_UpperCamelCase ) , _UpperCamelCase ) self.assertEqual(map_nested(_UpperCamelCase , _UpperCamelCase , num_proc=_UpperCamelCase ) , _UpperCamelCase ) _UpperCAmelCase = {'''a''': np.eye(2 ), '''b''': np.zeros(3 ), '''c''': np.ones(2 )} _UpperCAmelCase = {'''a''': 2, '''b''': 0, '''c''': 2} _UpperCAmelCase = { '''a''': np.eye(2 ).astype(_UpperCamelCase ), '''b''': np.zeros(3 ).astype(_UpperCamelCase ), '''c''': np.ones(2 ).astype(_UpperCamelCase ), } self.assertEqual(map_nested(_UpperCamelCase , _UpperCamelCase , map_numpy=_UpperCamelCase ) , _UpperCamelCase ) self.assertEqual( {k: v.tolist() for k, v in map_nested(_UpperCamelCase , _UpperCamelCase , map_numpy=_UpperCamelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , ) self.assertEqual(map_nested(_UpperCamelCase , _UpperCamelCase , map_numpy=_UpperCamelCase , num_proc=_UpperCamelCase ) , _UpperCamelCase ) self.assertEqual( {k: v.tolist() for k, v in map_nested(_UpperCamelCase , _UpperCamelCase , map_numpy=_UpperCamelCase , num_proc=_UpperCamelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , ) with self.assertRaises(_UpperCamelCase ): # can't pickle a local lambda map_nested(lambda _UpperCamelCase : x + 1 , _UpperCamelCase , num_proc=_UpperCamelCase ) def UpperCamelCase( self ): _UpperCAmelCase = {'''a''': 1, '''b''': 2} _UpperCAmelCase = {'''a''': 3, '''b''': 4} _UpperCAmelCase = {'''a''': 5, '''b''': 6} _UpperCAmelCase = sorted([('''a''', (1, 3, 5)), ('''b''', (2, 4, 6))] ) self.assertEqual(sorted(zip_dict(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) ) , _UpperCamelCase ) def UpperCamelCase( self ): class __UpperCamelCase : __A : int = """bar""" _UpperCAmelCase = Foo() self.assertEqual(foo.my_attr , '''bar''' ) with temporary_assignment(_UpperCamelCase , '''my_attr''' , '''BAR''' ): self.assertEqual(foo.my_attr , '''BAR''' ) self.assertEqual(foo.my_attr , '''bar''' ) @pytest.mark.parametrize( '''iterable_length, num_proc, expected_num_proc''' , [ (1, None, 1), (1, 1, 1), (2, None, 1), (2, 1, 1), (2, 2, 1), (2, 3, 1), (3, 2, 1), (16, 16, 16), (16, 17, 16), (17, 16, 16), ] , ) def A__ ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple ) -> Tuple: """simple docstring""" with patch('''datasets.utils.py_utils._single_map_nested''' ) as mock_single_map_nested, patch( '''datasets.parallel.parallel.Pool''' ) as mock_multiprocessing_pool: _UpperCAmelCase = {F'''{i}''': i for i in range(SCREAMING_SNAKE_CASE_ )} _UpperCAmelCase = map_nested(lambda SCREAMING_SNAKE_CASE_ : x + 10 , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ , parallel_min_length=16 ) if expected_num_proc == 1: assert mock_single_map_nested.called assert not mock_multiprocessing_pool.called else: assert not mock_single_map_nested.called assert mock_multiprocessing_pool.called assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc class __UpperCamelCase ( A__ ): @require_tf def UpperCamelCase( self ): import tensorflow as tf from tensorflow.keras import layers _UpperCAmelCase = layers.Dense(2 ) def gen_random_output(): _UpperCAmelCase = tf.random.uniform((1, 3) ) return model(_UpperCamelCase ).numpy() with temp_seed(42 , set_tensorflow=_UpperCamelCase ): _UpperCAmelCase = gen_random_output() with temp_seed(42 , set_tensorflow=_UpperCamelCase ): _UpperCAmelCase = gen_random_output() _UpperCAmelCase = gen_random_output() np.testing.assert_equal(_UpperCamelCase , _UpperCamelCase ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) @require_torch def UpperCamelCase( self ): import torch def gen_random_output(): _UpperCAmelCase = torch.nn.Linear(3 , 2 ) _UpperCAmelCase = torch.rand(1 , 3 ) return model(_UpperCamelCase ).detach().numpy() with temp_seed(42 , set_pytorch=_UpperCamelCase ): _UpperCAmelCase = gen_random_output() with temp_seed(42 , set_pytorch=_UpperCamelCase ): _UpperCAmelCase = gen_random_output() _UpperCAmelCase = gen_random_output() np.testing.assert_equal(_UpperCamelCase , _UpperCamelCase ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) def UpperCamelCase( self ): def gen_random_output(): return np.random.rand(1 , 3 ) with temp_seed(42 ): _UpperCAmelCase = gen_random_output() with temp_seed(42 ): _UpperCAmelCase = gen_random_output() _UpperCAmelCase = gen_random_output() np.testing.assert_equal(_UpperCamelCase , _UpperCamelCase ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) @pytest.mark.parametrize('''input_data''' , [{}] ) def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> Dict: """simple docstring""" _UpperCAmelCase = NestedDataStructure(SCREAMING_SNAKE_CASE_ ).data assert output_data == input_data @pytest.mark.parametrize( '''data, expected_output''' , [ ({}, []), ([], []), ('''foo''', ['''foo''']), (['''foo''', '''bar'''], ['''foo''', '''bar''']), ([['''foo''', '''bar''']], ['''foo''', '''bar''']), ([[['''foo'''], ['''bar''']]], ['''foo''', '''bar''']), ([[['''foo'''], '''bar''']], ['''foo''', '''bar''']), ({'''a''': 1, '''b''': 2}, [1, 2]), ({'''a''': [1, 2], '''b''': [3, 4]}, [1, 2, 3, 4]), ({'''a''': [[1, 2]], '''b''': [[3, 4]]}, [1, 2, 3, 4]), ({'''a''': [[1, 2]], '''b''': [3, 4]}, [1, 2, 3, 4]), ({'''a''': [[[1], [2]]], '''b''': [[[3], [4]]]}, [1, 2, 3, 4]), ({'''a''': [[[1], [2]]], '''b''': [[3, 4]]}, [1, 2, 3, 4]), ({'''a''': [[[1], [2]]], '''b''': [3, 4]}, [1, 2, 3, 4]), ({'''a''': [[[1], [2]]], '''b''': [3, [4]]}, [1, 2, 3, 4]), ({'''a''': {'''1''': 1}, '''b''': 2}, [1, 2]), ({'''a''': {'''1''': [1]}, '''b''': 2}, [1, 2]), ({'''a''': {'''1''': [1]}, '''b''': [2]}, [1, 2]), ] , ) def A__ ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = NestedDataStructure(SCREAMING_SNAKE_CASE_ ).flatten() assert output == expected_output def A__ ( ) -> int: """simple docstring""" _UpperCAmelCase = A(x=1 , y='''foobar''' ) _UpperCAmelCase = {'''x''': 1, '''y''': '''foobar'''} assert asdict(SCREAMING_SNAKE_CASE_ ) == expected_output _UpperCAmelCase = {'''a''': {'''b''': A(x=10 , y='''foo''' )}, '''c''': [A(x=20 , y='''bar''' )]} _UpperCAmelCase = {'''a''': {'''b''': {'''x''': 10, '''y''': '''foo'''}}, '''c''': [{'''x''': 20, '''y''': '''bar'''}]} assert asdict(SCREAMING_SNAKE_CASE_ ) == expected_output with pytest.raises(SCREAMING_SNAKE_CASE_ ): asdict([1, A(x=10 , y='''foo''' )] ) def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> int: """simple docstring""" return text.split() def A__ ( SCREAMING_SNAKE_CASE_ : List[Any] ) -> Any: """simple docstring""" yield (time.time(), content) time.sleep(2 ) yield (time.time(), content) def A__ ( ) -> str: """simple docstring""" with Pool(2 ) as pool: _UpperCAmelCase = list(iflatmap_unordered(SCREAMING_SNAKE_CASE_ , _split_text , kwargs_iterable=[{'''text''': '''hello there'''}] * 10 ) ) assert out.count('''hello''' ) == 10 assert out.count('''there''' ) == 10 assert len(SCREAMING_SNAKE_CASE_ ) == 20 # check multiprocess from pathos (uses dill for pickling) with multiprocess.Pool(2 ) as pool: _UpperCAmelCase = list(iflatmap_unordered(SCREAMING_SNAKE_CASE_ , _split_text , kwargs_iterable=[{'''text''': '''hello there'''}] * 10 ) ) assert out.count('''hello''' ) == 10 assert out.count('''there''' ) == 10 assert len(SCREAMING_SNAKE_CASE_ ) == 20 # check that we get items as fast as possible with Pool(2 ) as pool: _UpperCAmelCase = [] for yield_time, content in iflatmap_unordered( SCREAMING_SNAKE_CASE_ , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{'''content''': '''a'''}, {'''content''': '''b'''}] ): assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded" out.append(SCREAMING_SNAKE_CASE_ ) assert out.count('''a''' ) == 2 assert out.count('''b''' ) == 2 assert len(SCREAMING_SNAKE_CASE_ ) == 4
32
import gc import unittest import numpy as np import torch from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class __UpperCamelCase ( A__ , unittest.TestCase ): __A : Any = DanceDiffusionPipeline __A : Any = UNCONDITIONAL_AUDIO_GENERATION_PARAMS __A : Tuple = PipelineTesterMixin.required_optional_params - { """callback""", """latents""", """callback_steps""", """output_type""", """num_images_per_prompt""", } __A : Tuple = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS __A : List[str] = False __A : str = False def UpperCamelCase( self ): torch.manual_seed(0 ) _UpperCAmelCase = UNetaDModel( block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=16000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=_UpperCamelCase , use_timestep_embedding=_UpperCamelCase , time_embedding_type='''fourier''' , mid_block_type='''UNetMidBlock1D''' , down_block_types=('''DownBlock1DNoSkip''', '''DownBlock1D''', '''AttnDownBlock1D''') , up_block_types=('''AttnUpBlock1D''', '''UpBlock1D''', '''UpBlock1DNoSkip''') , ) _UpperCAmelCase = IPNDMScheduler() _UpperCAmelCase = { '''unet''': unet, '''scheduler''': scheduler, } return components def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase=0 ): if str(_UpperCamelCase ).startswith('''mps''' ): _UpperCAmelCase = torch.manual_seed(_UpperCamelCase ) else: _UpperCAmelCase = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase ) _UpperCAmelCase = { '''batch_size''': 1, '''generator''': generator, '''num_inference_steps''': 4, } return inputs def UpperCamelCase( self ): _UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator _UpperCAmelCase = self.get_dummy_components() _UpperCAmelCase = DanceDiffusionPipeline(**_UpperCamelCase ) _UpperCAmelCase = pipe.to(_UpperCamelCase ) pipe.set_progress_bar_config(disable=_UpperCamelCase ) _UpperCAmelCase = self.get_dummy_inputs(_UpperCamelCase ) _UpperCAmelCase = pipe(**_UpperCamelCase ) _UpperCAmelCase = output.audios _UpperCAmelCase = audio[0, -3:, -3:] assert audio.shape == (1, 2, components["unet"].sample_size) _UpperCAmelCase = np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2 @skip_mps def UpperCamelCase( self ): return super().test_save_load_local() @skip_mps def UpperCamelCase( self ): return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 ) @skip_mps def UpperCamelCase( self ): return super().test_save_load_optional_components() @skip_mps def UpperCamelCase( self ): return super().test_attention_slicing_forward_pass() def UpperCamelCase( self ): super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class __UpperCamelCase ( unittest.TestCase ): def UpperCamelCase( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase( self ): _UpperCAmelCase = torch_device _UpperCAmelCase = DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' ) _UpperCAmelCase = pipe.to(_UpperCamelCase ) pipe.set_progress_bar_config(disable=_UpperCamelCase ) _UpperCAmelCase = torch.manual_seed(0 ) _UpperCAmelCase = pipe(generator=_UpperCamelCase , num_inference_steps=100 , audio_length_in_s=4.096 ) _UpperCAmelCase = output.audios _UpperCAmelCase = audio[0, -3:, -3:] assert audio.shape == (1, 2, pipe.unet.sample_size) _UpperCAmelCase = np.array([-0.0192, -0.0231, -0.0318, -0.0059, 0.0002, -0.0020] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2 def UpperCamelCase( self ): _UpperCAmelCase = torch_device _UpperCAmelCase = DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' , torch_dtype=torch.floataa ) _UpperCAmelCase = pipe.to(_UpperCamelCase ) pipe.set_progress_bar_config(disable=_UpperCamelCase ) _UpperCAmelCase = torch.manual_seed(0 ) _UpperCAmelCase = pipe(generator=_UpperCamelCase , num_inference_steps=100 , audio_length_in_s=4.096 ) _UpperCAmelCase = output.audios _UpperCAmelCase = audio[0, -3:, -3:] assert audio.shape == (1, 2, pipe.unet.sample_size) _UpperCAmelCase = np.array([-0.0367, -0.0488, -0.0771, -0.0525, -0.0444, -0.0341] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
32
1
import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class __UpperCamelCase ( unittest.TestCase ): def UpperCamelCase( self ): _UpperCAmelCase = '''ylacombe/bark-small''' _UpperCAmelCase = tempfile.mkdtemp() _UpperCAmelCase = '''en_speaker_1''' _UpperCAmelCase = '''This is a test string''' _UpperCAmelCase = '''speaker_embeddings_path.json''' _UpperCAmelCase = '''speaker_embeddings''' def UpperCamelCase( self , **_UpperCamelCase ): return AutoTokenizer.from_pretrained(self.checkpoint , **_UpperCamelCase ) def UpperCamelCase( self ): shutil.rmtree(self.tmpdirname ) def UpperCamelCase( self ): _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = BarkProcessor(tokenizer=_UpperCamelCase ) processor.save_pretrained(self.tmpdirname ) _UpperCAmelCase = BarkProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) @slow def UpperCamelCase( self ): _UpperCAmelCase = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) processor.save_pretrained( self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , ) _UpperCAmelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) _UpperCAmelCase = BarkProcessor.from_pretrained( self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) def UpperCamelCase( self ): _UpperCAmelCase = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) _UpperCAmelCase = 35 _UpperCAmelCase = 2 _UpperCAmelCase = 8 _UpperCAmelCase = { '''semantic_prompt''': np.ones(_UpperCamelCase ), '''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len) ), '''fine_prompt''': np.ones((nb_codebooks_total, seq_len) ), } # test providing already loaded voice_preset _UpperCAmelCase = processor(text=self.input_string , voice_preset=_UpperCamelCase ) _UpperCAmelCase = inputs['''history_prompt'''] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_UpperCamelCase , np.array([] ) ).tolist() ) # test loading voice preset from npz file _UpperCAmelCase = os.path.join(self.tmpdirname , '''file.npz''' ) np.savez(_UpperCamelCase , **_UpperCamelCase ) _UpperCAmelCase = processor(text=self.input_string , voice_preset=_UpperCamelCase ) _UpperCAmelCase = inputs['''history_prompt'''] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_UpperCamelCase , np.array([] ) ).tolist() ) # test loading voice preset from the hub _UpperCAmelCase = processor(text=self.input_string , voice_preset=self.voice_preset ) def UpperCamelCase( self ): _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = BarkProcessor(tokenizer=_UpperCamelCase ) _UpperCAmelCase = processor(text=self.input_string ) _UpperCAmelCase = tokenizer( self.input_string , padding='''max_length''' , max_length=256 , add_special_tokens=_UpperCamelCase , return_attention_mask=_UpperCamelCase , return_token_type_ids=_UpperCamelCase , ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
32
from collections import OrderedDict from ...utils import logging from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update from .configuration_auto import CONFIG_MAPPING_NAMES UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = OrderedDict( [ # Base model mapping ("albert", "FlaxAlbertModel"), ("bart", "FlaxBartModel"), ("beit", "FlaxBeitModel"), ("bert", "FlaxBertModel"), ("big_bird", "FlaxBigBirdModel"), ("blenderbot", "FlaxBlenderbotModel"), ("blenderbot-small", "FlaxBlenderbotSmallModel"), ("clip", "FlaxCLIPModel"), ("distilbert", "FlaxDistilBertModel"), ("electra", "FlaxElectraModel"), ("gpt-sw3", "FlaxGPT2Model"), ("gpt2", "FlaxGPT2Model"), ("gpt_neo", "FlaxGPTNeoModel"), ("gptj", "FlaxGPTJModel"), ("longt5", "FlaxLongT5Model"), ("marian", "FlaxMarianModel"), ("mbart", "FlaxMBartModel"), ("mt5", "FlaxMT5Model"), ("opt", "FlaxOPTModel"), ("pegasus", "FlaxPegasusModel"), ("regnet", "FlaxRegNetModel"), ("resnet", "FlaxResNetModel"), ("roberta", "FlaxRobertaModel"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormModel"), ("roformer", "FlaxRoFormerModel"), ("t5", "FlaxT5Model"), ("vision-text-dual-encoder", "FlaxVisionTextDualEncoderModel"), ("vit", "FlaxViTModel"), ("wav2vec2", "FlaxWav2Vec2Model"), ("whisper", "FlaxWhisperModel"), ("xglm", "FlaxXGLMModel"), ("xlm-roberta", "FlaxXLMRobertaModel"), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for pre-training mapping ("albert", "FlaxAlbertForPreTraining"), ("bart", "FlaxBartForConditionalGeneration"), ("bert", "FlaxBertForPreTraining"), ("big_bird", "FlaxBigBirdForPreTraining"), ("electra", "FlaxElectraForPreTraining"), ("longt5", "FlaxLongT5ForConditionalGeneration"), ("mbart", "FlaxMBartForConditionalGeneration"), ("mt5", "FlaxMT5ForConditionalGeneration"), ("roberta", "FlaxRobertaForMaskedLM"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"), ("roformer", "FlaxRoFormerForMaskedLM"), ("t5", "FlaxT5ForConditionalGeneration"), ("wav2vec2", "FlaxWav2Vec2ForPreTraining"), ("whisper", "FlaxWhisperForConditionalGeneration"), ("xlm-roberta", "FlaxXLMRobertaForMaskedLM"), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for Masked LM mapping ("albert", "FlaxAlbertForMaskedLM"), ("bart", "FlaxBartForConditionalGeneration"), ("bert", "FlaxBertForMaskedLM"), ("big_bird", "FlaxBigBirdForMaskedLM"), ("distilbert", "FlaxDistilBertForMaskedLM"), ("electra", "FlaxElectraForMaskedLM"), ("mbart", "FlaxMBartForConditionalGeneration"), ("roberta", "FlaxRobertaForMaskedLM"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"), ("roformer", "FlaxRoFormerForMaskedLM"), ("xlm-roberta", "FlaxXLMRobertaForMaskedLM"), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for Seq2Seq Causal LM mapping ("bart", "FlaxBartForConditionalGeneration"), ("blenderbot", "FlaxBlenderbotForConditionalGeneration"), ("blenderbot-small", "FlaxBlenderbotSmallForConditionalGeneration"), ("encoder-decoder", "FlaxEncoderDecoderModel"), ("longt5", "FlaxLongT5ForConditionalGeneration"), ("marian", "FlaxMarianMTModel"), ("mbart", "FlaxMBartForConditionalGeneration"), ("mt5", "FlaxMT5ForConditionalGeneration"), ("pegasus", "FlaxPegasusForConditionalGeneration"), ("t5", "FlaxT5ForConditionalGeneration"), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for Image-classsification ("beit", "FlaxBeitForImageClassification"), ("regnet", "FlaxRegNetForImageClassification"), ("resnet", "FlaxResNetForImageClassification"), ("vit", "FlaxViTForImageClassification"), ] ) UpperCAmelCase_ = OrderedDict( [ ("vision-encoder-decoder", "FlaxVisionEncoderDecoderModel"), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for Causal LM mapping ("bart", "FlaxBartForCausalLM"), ("bert", "FlaxBertForCausalLM"), ("big_bird", "FlaxBigBirdForCausalLM"), ("electra", "FlaxElectraForCausalLM"), ("gpt-sw3", "FlaxGPT2LMHeadModel"), ("gpt2", "FlaxGPT2LMHeadModel"), ("gpt_neo", "FlaxGPTNeoForCausalLM"), ("gptj", "FlaxGPTJForCausalLM"), ("opt", "FlaxOPTForCausalLM"), ("roberta", "FlaxRobertaForCausalLM"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForCausalLM"), ("xglm", "FlaxXGLMForCausalLM"), ("xlm-roberta", "FlaxXLMRobertaForCausalLM"), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for Sequence Classification mapping ("albert", "FlaxAlbertForSequenceClassification"), ("bart", "FlaxBartForSequenceClassification"), ("bert", "FlaxBertForSequenceClassification"), ("big_bird", "FlaxBigBirdForSequenceClassification"), ("distilbert", "FlaxDistilBertForSequenceClassification"), ("electra", "FlaxElectraForSequenceClassification"), ("mbart", "FlaxMBartForSequenceClassification"), ("roberta", "FlaxRobertaForSequenceClassification"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForSequenceClassification"), ("roformer", "FlaxRoFormerForSequenceClassification"), ("xlm-roberta", "FlaxXLMRobertaForSequenceClassification"), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for Question Answering mapping ("albert", "FlaxAlbertForQuestionAnswering"), ("bart", "FlaxBartForQuestionAnswering"), ("bert", "FlaxBertForQuestionAnswering"), ("big_bird", "FlaxBigBirdForQuestionAnswering"), ("distilbert", "FlaxDistilBertForQuestionAnswering"), ("electra", "FlaxElectraForQuestionAnswering"), ("mbart", "FlaxMBartForQuestionAnswering"), ("roberta", "FlaxRobertaForQuestionAnswering"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForQuestionAnswering"), ("roformer", "FlaxRoFormerForQuestionAnswering"), ("xlm-roberta", "FlaxXLMRobertaForQuestionAnswering"), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for Token Classification mapping ("albert", "FlaxAlbertForTokenClassification"), ("bert", "FlaxBertForTokenClassification"), ("big_bird", "FlaxBigBirdForTokenClassification"), ("distilbert", "FlaxDistilBertForTokenClassification"), ("electra", "FlaxElectraForTokenClassification"), ("roberta", "FlaxRobertaForTokenClassification"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForTokenClassification"), ("roformer", "FlaxRoFormerForTokenClassification"), ("xlm-roberta", "FlaxXLMRobertaForTokenClassification"), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for Multiple Choice mapping ("albert", "FlaxAlbertForMultipleChoice"), ("bert", "FlaxBertForMultipleChoice"), ("big_bird", "FlaxBigBirdForMultipleChoice"), ("distilbert", "FlaxDistilBertForMultipleChoice"), ("electra", "FlaxElectraForMultipleChoice"), ("roberta", "FlaxRobertaForMultipleChoice"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMultipleChoice"), ("roformer", "FlaxRoFormerForMultipleChoice"), ("xlm-roberta", "FlaxXLMRobertaForMultipleChoice"), ] ) UpperCAmelCase_ = OrderedDict( [ ("bert", "FlaxBertForNextSentencePrediction"), ] ) UpperCAmelCase_ = OrderedDict( [ ("speech-encoder-decoder", "FlaxSpeechEncoderDecoderModel"), ("whisper", "FlaxWhisperForConditionalGeneration"), ] ) UpperCAmelCase_ = OrderedDict( [ ("whisper", "FlaxWhisperForAudioClassification"), ] ) UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES) UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES) UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES ) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES ) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES ) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES ) class __UpperCamelCase ( _BaseAutoModelClass ): __A : List[str] = FLAX_MODEL_MAPPING UpperCAmelCase_ = auto_class_update(FlaxAutoModel) class __UpperCamelCase ( _BaseAutoModelClass ): __A : Optional[Any] = FLAX_MODEL_FOR_PRETRAINING_MAPPING UpperCAmelCase_ = auto_class_update(FlaxAutoModelForPreTraining, head_doc="pretraining") class __UpperCamelCase ( _BaseAutoModelClass ): __A : List[Any] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING UpperCAmelCase_ = auto_class_update(FlaxAutoModelForCausalLM, head_doc="causal language modeling") class __UpperCamelCase ( _BaseAutoModelClass ): __A : List[str] = FLAX_MODEL_FOR_MASKED_LM_MAPPING UpperCAmelCase_ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="masked language modeling") class __UpperCamelCase ( _BaseAutoModelClass ): __A : Dict = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING UpperCAmelCase_ = auto_class_update( FlaxAutoModelForSeqaSeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base" ) class __UpperCamelCase ( _BaseAutoModelClass ): __A : List[str] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING UpperCAmelCase_ = auto_class_update( FlaxAutoModelForSequenceClassification, head_doc="sequence classification" ) class __UpperCamelCase ( _BaseAutoModelClass ): __A : Dict = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING UpperCAmelCase_ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="question answering") class __UpperCamelCase ( _BaseAutoModelClass ): __A : Union[str, Any] = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING UpperCAmelCase_ = auto_class_update( FlaxAutoModelForTokenClassification, head_doc="token classification" ) class __UpperCamelCase ( _BaseAutoModelClass ): __A : Union[str, Any] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING UpperCAmelCase_ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="multiple choice") class __UpperCamelCase ( _BaseAutoModelClass ): __A : Any = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING UpperCAmelCase_ = auto_class_update( FlaxAutoModelForNextSentencePrediction, head_doc="next sentence prediction" ) class __UpperCamelCase ( _BaseAutoModelClass ): __A : Dict = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING UpperCAmelCase_ = auto_class_update( FlaxAutoModelForImageClassification, head_doc="image classification" ) class __UpperCamelCase ( _BaseAutoModelClass ): __A : Optional[int] = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING UpperCAmelCase_ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="vision-to-text modeling") class __UpperCamelCase ( _BaseAutoModelClass ): __A : str = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING UpperCAmelCase_ = auto_class_update( FlaxAutoModelForSpeechSeqaSeq, head_doc="sequence-to-sequence speech-to-text modeling" )
32
1
import unittest import torch from torch import nn from diffusers.models.activations import get_activation class __UpperCamelCase ( unittest.TestCase ): def UpperCamelCase( self ): _UpperCAmelCase = get_activation('''swish''' ) self.assertIsInstance(_UpperCamelCase , nn.SiLU ) self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def UpperCamelCase( self ): _UpperCAmelCase = get_activation('''silu''' ) self.assertIsInstance(_UpperCamelCase , nn.SiLU ) self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def UpperCamelCase( self ): _UpperCAmelCase = get_activation('''mish''' ) self.assertIsInstance(_UpperCamelCase , nn.Mish ) self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def UpperCamelCase( self ): _UpperCAmelCase = get_activation('''gelu''' ) self.assertIsInstance(_UpperCamelCase , nn.GELU ) self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
32
import baseaa def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> bytes: """simple docstring""" return baseaa.baaencode(string.encode('''utf-8''' ) ) def A__ ( SCREAMING_SNAKE_CASE_ : bytes ) -> str: """simple docstring""" return baseaa.baadecode(SCREAMING_SNAKE_CASE_ ).decode('''utf-8''' ) if __name__ == "__main__": UpperCAmelCase_ = "Hello World!" UpperCAmelCase_ = baseaa_encode(test) print(encoded) UpperCAmelCase_ = baseaa_decode(encoded) print(decoded)
32
1
import numpy as np import datasets UpperCAmelCase_ = "\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n" UpperCAmelCase_ = "\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n" UpperCAmelCase_ = "\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric(\"mahalanobis\")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {'mahalanobis': array([0.5])}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __UpperCamelCase ( datasets.Metric ): def UpperCamelCase( self ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''X''': datasets.Sequence(datasets.Value('''float''' , id='''sequence''' ) , id='''X''' ), } ) , ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase ): # convert to numpy arrays _UpperCAmelCase = np.array(_UpperCamelCase ) _UpperCAmelCase = np.array(_UpperCamelCase ) # Assert that arrays are 2D if len(X.shape ) != 2: raise ValueError('''Expected `X` to be a 2D vector''' ) if len(reference_distribution.shape ) != 2: raise ValueError('''Expected `reference_distribution` to be a 2D vector''' ) if reference_distribution.shape[0] < 2: raise ValueError( '''Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension''' ) # Get mahalanobis distance for each prediction _UpperCAmelCase = X - np.mean(_UpperCamelCase ) _UpperCAmelCase = np.cov(reference_distribution.T ) try: _UpperCAmelCase = np.linalg.inv(_UpperCamelCase ) except np.linalg.LinAlgError: _UpperCAmelCase = np.linalg.pinv(_UpperCamelCase ) _UpperCAmelCase = np.dot(_UpperCamelCase , _UpperCamelCase ) _UpperCAmelCase = np.dot(_UpperCamelCase , X_minus_mu.T ).diagonal() return {"mahalanobis": mahal_dist}
32
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, is_batched, to_numpy_array, valid_images, ) from ...utils import TensorType, logging UpperCAmelCase_ = logging.get_logger(__name__) class __UpperCamelCase ( A__ ): __A : int = ["""pixel_values"""] def __init__( self , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = PILImageResampling.BICUBIC , _UpperCamelCase = True , _UpperCamelCase = True , _UpperCamelCase = 1 / 255 , _UpperCamelCase = None , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ): super().__init__(**_UpperCamelCase ) _UpperCAmelCase = size if size is not None else {'''height''': 224, '''width''': 224} _UpperCAmelCase = get_size_dict(_UpperCamelCase ) _UpperCAmelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} _UpperCAmelCase = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase , param_name='''crop_size''' ) _UpperCAmelCase = do_resize _UpperCAmelCase = do_rescale _UpperCAmelCase = do_normalize _UpperCAmelCase = do_center_crop _UpperCAmelCase = crop_size _UpperCAmelCase = size _UpperCAmelCase = resample _UpperCAmelCase = rescale_factor _UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN _UpperCAmelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = PILImageResampling.BILINEAR , _UpperCamelCase = None , **_UpperCamelCase , ): _UpperCAmelCase = get_size_dict(_UpperCamelCase ) if "shortest_edge" in size: _UpperCAmelCase = get_resize_output_image_size(_UpperCamelCase , size=size['''shortest_edge'''] , default_to_square=_UpperCamelCase ) # size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"]) elif "height" in size and "width" in size: _UpperCAmelCase = (size['''height'''], size['''width''']) else: raise ValueError(f'''Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}''' ) return resize(_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ): _UpperCAmelCase = get_size_dict(_UpperCamelCase ) if "height" not in size or "width" not in size: raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(_UpperCamelCase , size=(size['''height'''], size['''width''']) , data_format=_UpperCamelCase , **_UpperCamelCase ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase ): return rescale(_UpperCamelCase , scale=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ): return normalize(_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = ChannelDimension.FIRST , **_UpperCamelCase , ): _UpperCAmelCase = do_resize if do_resize is not None else self.do_resize _UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale _UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize _UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop _UpperCAmelCase = crop_size if crop_size is not None else self.crop_size _UpperCAmelCase = get_size_dict(_UpperCamelCase , param_name='''crop_size''' , default_to_square=_UpperCamelCase ) _UpperCAmelCase = resample if resample is not None else self.resample _UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor _UpperCAmelCase = image_mean if image_mean is not None else self.image_mean _UpperCAmelCase = image_std if image_std is not None else self.image_std _UpperCAmelCase = size if size is not None else self.size _UpperCAmelCase = get_size_dict(_UpperCamelCase ) if not is_batched(_UpperCamelCase ): _UpperCAmelCase = [images] if not valid_images(_UpperCamelCase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) # All transformations expect numpy arrays. _UpperCAmelCase = [to_numpy_array(_UpperCamelCase ) for image in images] if do_resize: _UpperCAmelCase = [self.resize(image=_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase ) for image in images] if do_center_crop: _UpperCAmelCase = [self.center_crop(image=_UpperCamelCase , size=_UpperCamelCase ) for image in images] if do_rescale: _UpperCAmelCase = [self.rescale(image=_UpperCamelCase , scale=_UpperCamelCase ) for image in images] if do_normalize: _UpperCAmelCase = [self.normalize(image=_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase ) for image in images] _UpperCAmelCase = [to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase ) for image in images] _UpperCAmelCase = {'''pixel_values''': images} return BatchFeature(data=_UpperCamelCase , tensor_type=_UpperCamelCase )
32
1
import copy from typing import Any, Dict, List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging UpperCAmelCase_ = logging.get_logger(__name__) class __UpperCamelCase ( A__ ): __A : str = ["""input_features"""] def __init__( self , _UpperCamelCase=80 , _UpperCamelCase=16000 , _UpperCamelCase=160 , _UpperCamelCase=30 , _UpperCamelCase=400 , _UpperCamelCase=0.0 , _UpperCamelCase=False , **_UpperCamelCase , ): super().__init__( feature_size=_UpperCamelCase , sampling_rate=_UpperCamelCase , padding_value=_UpperCamelCase , return_attention_mask=_UpperCamelCase , **_UpperCamelCase , ) _UpperCAmelCase = n_fft _UpperCAmelCase = hop_length _UpperCAmelCase = chunk_length _UpperCAmelCase = chunk_length * sampling_rate _UpperCAmelCase = self.n_samples // hop_length _UpperCAmelCase = sampling_rate _UpperCAmelCase = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=_UpperCamelCase , min_frequency=0.0 , max_frequency=8000.0 , sampling_rate=_UpperCamelCase , norm='''slaney''' , mel_scale='''slaney''' , ) def UpperCamelCase( self , _UpperCamelCase ): _UpperCAmelCase = spectrogram( _UpperCamelCase , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='''log10''' , ) _UpperCAmelCase = log_spec[:, :-1] _UpperCAmelCase = np.maximum(_UpperCamelCase , log_spec.max() - 8.0 ) _UpperCAmelCase = (log_spec + 4.0) / 4.0 return log_spec @staticmethod # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm def UpperCamelCase( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0.0 ): if attention_mask is not None: _UpperCAmelCase = np.array(_UpperCamelCase , np.intaa ) _UpperCAmelCase = [] for vector, length in zip(_UpperCamelCase , attention_mask.sum(-1 ) ): _UpperCAmelCase = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 ) if length < normed_slice.shape[0]: _UpperCAmelCase = padding_value normed_input_values.append(_UpperCamelCase ) else: _UpperCAmelCase = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values] return normed_input_values def __call__( self , _UpperCamelCase , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = "max_length" , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ): if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a''' f''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input''' f''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( '''It is strongly recommended to pass the `sampling_rate` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) _UpperCAmelCase = isinstance(_UpperCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' ) _UpperCAmelCase = is_batched_numpy or ( isinstance(_UpperCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: _UpperCAmelCase = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(_UpperCamelCase , np.ndarray ): _UpperCAmelCase = np.asarray(_UpperCamelCase , dtype=np.floataa ) elif isinstance(_UpperCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): _UpperCAmelCase = raw_speech.astype(np.floataa ) # always return batch if not is_batched: _UpperCAmelCase = [np.asarray([raw_speech] ).T] _UpperCAmelCase = BatchFeature({'''input_features''': raw_speech} ) # convert into correct format for padding _UpperCAmelCase = self.pad( _UpperCamelCase , padding=_UpperCamelCase , max_length=max_length if max_length else self.n_samples , truncation=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_attention_mask=return_attention_mask or do_normalize , ) # zero-mean and unit-variance normalization if do_normalize: _UpperCAmelCase = self.zero_mean_unit_var_norm( padded_inputs['''input_features'''] , attention_mask=padded_inputs['''attention_mask'''] , padding_value=self.padding_value , ) _UpperCAmelCase = np.stack(padded_inputs['''input_features'''] , axis=0 ) # make sure list is in array format _UpperCAmelCase = padded_inputs.get('''input_features''' ).transpose(2 , 0 , 1 ) _UpperCAmelCase = [self._np_extract_fbank_features(_UpperCamelCase ) for waveform in input_features[0]] if isinstance(input_features[0] , _UpperCamelCase ): _UpperCAmelCase = [np.asarray(_UpperCamelCase , dtype=np.floataa ) for feature in input_features] else: _UpperCAmelCase = input_features if return_attention_mask: # rescale from sample (48000) to feature (3000) _UpperCAmelCase = padded_inputs['''attention_mask'''][:, :: self.hop_length] if return_tensors is not None: _UpperCAmelCase = padded_inputs.convert_to_tensors(_UpperCamelCase ) return padded_inputs def UpperCamelCase( self ): _UpperCAmelCase = copy.deepcopy(self.__dict__ ) _UpperCAmelCase = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] return output
32
from ..utils import DummyObject, requires_backends class __UpperCamelCase ( metaclass=A__ ): __A : str = ["""torch""", """scipy"""] def __init__( self , *_UpperCamelCase , **_UpperCamelCase ): requires_backends(self , ['''torch''', '''scipy'''] ) @classmethod def UpperCamelCase( cls , *_UpperCamelCase , **_UpperCamelCase ): requires_backends(cls , ['''torch''', '''scipy'''] ) @classmethod def UpperCamelCase( cls , *_UpperCamelCase , **_UpperCamelCase ): requires_backends(cls , ['''torch''', '''scipy'''] )
32
1
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { "funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/config.json", "funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json", "funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/config.json", "funnel-transformer/medium-base": "https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json", "funnel-transformer/intermediate": ( "https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json" ), "funnel-transformer/intermediate-base": ( "https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json" ), "funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/config.json", "funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json", "funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json", "funnel-transformer/xlarge-base": "https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json", } class __UpperCamelCase ( A__ ): __A : int = """funnel""" __A : Union[str, Any] = { """hidden_size""": """d_model""", """num_attention_heads""": """n_head""", } def __init__( self , _UpperCamelCase=30522 , _UpperCamelCase=[4, 4, 4] , _UpperCamelCase=None , _UpperCamelCase=2 , _UpperCamelCase=768 , _UpperCamelCase=12 , _UpperCamelCase=64 , _UpperCamelCase=3072 , _UpperCamelCase="gelu_new" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=0.0 , _UpperCamelCase=0.1 , _UpperCamelCase=None , _UpperCamelCase=1e-9 , _UpperCamelCase="mean" , _UpperCamelCase="relative_shift" , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , **_UpperCamelCase , ): _UpperCAmelCase = vocab_size _UpperCAmelCase = block_sizes _UpperCAmelCase = [1] * len(_UpperCamelCase ) if block_repeats is None else block_repeats assert len(_UpperCamelCase ) == len( self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length." _UpperCAmelCase = num_decoder_layers _UpperCAmelCase = d_model _UpperCAmelCase = n_head _UpperCAmelCase = d_head _UpperCAmelCase = d_inner _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout _UpperCAmelCase = attention_dropout _UpperCAmelCase = activation_dropout _UpperCAmelCase = initializer_range _UpperCAmelCase = initializer_std _UpperCAmelCase = layer_norm_eps assert pooling_type in [ "mean", "max", ], f'''Got {pooling_type} for `pooling_type` but only \'mean\' and \'max\' are supported.''' _UpperCAmelCase = pooling_type assert attention_type in [ "relative_shift", "factorized", ], f'''Got {attention_type} for `attention_type` but only \'relative_shift\' and \'factorized\' are supported.''' _UpperCAmelCase = attention_type _UpperCAmelCase = separate_cls _UpperCAmelCase = truncate_seq _UpperCAmelCase = pool_q_only super().__init__(**_UpperCamelCase ) @property def UpperCamelCase( self ): return sum(self.block_sizes ) @num_hidden_layers.setter def UpperCamelCase( self , _UpperCamelCase ): raise NotImplementedError( '''This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.''' ) @property def UpperCamelCase( self ): return len(self.block_sizes ) @num_blocks.setter def UpperCamelCase( self , _UpperCamelCase ): raise NotImplementedError('''This model does not support the setting of `num_blocks`. Please set `block_sizes`.''' )
32
def A__ ( SCREAMING_SNAKE_CASE_ : int = 2_00_00_00 ) -> int: """simple docstring""" _UpperCAmelCase = [0 for i in range(n + 1 )] _UpperCAmelCase = 1 _UpperCAmelCase = 1 for i in range(2 , int(n**0.5 ) + 1 ): if primality_list[i] == 0: for j in range(i * i , n + 1 , SCREAMING_SNAKE_CASE_ ): _UpperCAmelCase = 1 _UpperCAmelCase = 0 for i in range(SCREAMING_SNAKE_CASE_ ): if primality_list[i] == 0: sum_of_primes += i return sum_of_primes if __name__ == "__main__": print(f'''{solution() = }''')
32
1
from math import isqrt def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> list[int]: """simple docstring""" _UpperCAmelCase = [True] * max_number for i in range(2 , isqrt(max_number - 1 ) + 1 ): if is_prime[i]: for j in range(i**2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): _UpperCAmelCase = False return [i for i in range(2 , SCREAMING_SNAKE_CASE_ ) if is_prime[i]] def A__ ( SCREAMING_SNAKE_CASE_ : int = 10**8 ) -> int: """simple docstring""" _UpperCAmelCase = calculate_prime_numbers(max_number // 2 ) _UpperCAmelCase = 0 _UpperCAmelCase = 0 _UpperCAmelCase = len(SCREAMING_SNAKE_CASE_ ) - 1 while left <= right: while prime_numbers[left] * prime_numbers[right] >= max_number: right -= 1 semiprimes_count += right - left + 1 left += 1 return semiprimes_count if __name__ == "__main__": print(f'''{solution() = }''')
32
import warnings from ...utils import logging from .image_processing_glpn import GLPNImageProcessor UpperCAmelCase_ = logging.get_logger(__name__) class __UpperCamelCase ( A__ ): def __init__( self , *_UpperCamelCase , **_UpperCamelCase ): warnings.warn( '''The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use GLPNImageProcessor instead.''' , _UpperCamelCase , ) super().__init__(*_UpperCamelCase , **_UpperCamelCase )
32
1
from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_outputs import ( BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import logging from .configuration_regnet import RegNetConfig UpperCAmelCase_ = logging.get_logger(__name__) # General docstring UpperCAmelCase_ = "RegNetConfig" # Base docstring UpperCAmelCase_ = "facebook/regnet-y-040" UpperCAmelCase_ = [1, 10_88, 7, 7] # Image classification docstring UpperCAmelCase_ = "facebook/regnet-y-040" UpperCAmelCase_ = "tabby, tabby cat" UpperCAmelCase_ = [ "facebook/regnet-y-040", # See all regnet models at https://huggingface.co/models?filter=regnet ] class __UpperCamelCase ( nn.Module ): def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 3 , _UpperCamelCase = 1 , _UpperCamelCase = 1 , _UpperCamelCase = "relu" , ): super().__init__() _UpperCAmelCase = nn.Convad( _UpperCamelCase , _UpperCamelCase , kernel_size=_UpperCamelCase , stride=_UpperCamelCase , padding=kernel_size // 2 , groups=_UpperCamelCase , bias=_UpperCamelCase , ) _UpperCAmelCase = nn.BatchNormad(_UpperCamelCase ) _UpperCAmelCase = ACTaFN[activation] if activation is not None else nn.Identity() def UpperCamelCase( self , _UpperCamelCase ): _UpperCAmelCase = self.convolution(_UpperCamelCase ) _UpperCAmelCase = self.normalization(_UpperCamelCase ) _UpperCAmelCase = self.activation(_UpperCamelCase ) return hidden_state class __UpperCamelCase ( nn.Module ): def __init__( self , _UpperCamelCase ): super().__init__() _UpperCAmelCase = RegNetConvLayer( config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act ) _UpperCAmelCase = config.num_channels def UpperCamelCase( self , _UpperCamelCase ): _UpperCAmelCase = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( '''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' ) _UpperCAmelCase = self.embedder(_UpperCamelCase ) return hidden_state class __UpperCamelCase ( nn.Module ): def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 2 ): super().__init__() _UpperCAmelCase = nn.Convad(_UpperCamelCase , _UpperCamelCase , kernel_size=1 , stride=_UpperCamelCase , bias=_UpperCamelCase ) _UpperCAmelCase = nn.BatchNormad(_UpperCamelCase ) def UpperCamelCase( self , _UpperCamelCase ): _UpperCAmelCase = self.convolution(_UpperCamelCase ) _UpperCAmelCase = self.normalization(_UpperCamelCase ) return hidden_state class __UpperCamelCase ( nn.Module ): def __init__( self , _UpperCamelCase , _UpperCamelCase ): super().__init__() _UpperCAmelCase = nn.AdaptiveAvgPoolad((1, 1) ) _UpperCAmelCase = nn.Sequential( nn.Convad(_UpperCamelCase , _UpperCamelCase , kernel_size=1 ) , nn.ReLU() , nn.Convad(_UpperCamelCase , _UpperCamelCase , kernel_size=1 ) , nn.Sigmoid() , ) def UpperCamelCase( self , _UpperCamelCase ): # b c h w -> b c 1 1 _UpperCAmelCase = self.pooler(_UpperCamelCase ) _UpperCAmelCase = self.attention(_UpperCamelCase ) _UpperCAmelCase = hidden_state * attention return hidden_state class __UpperCamelCase ( nn.Module ): def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1 ): super().__init__() _UpperCAmelCase = in_channels != out_channels or stride != 1 _UpperCAmelCase = max(1 , out_channels // config.groups_width ) _UpperCAmelCase = ( RegNetShortCut(_UpperCamelCase , _UpperCamelCase , stride=_UpperCamelCase ) if should_apply_shortcut else nn.Identity() ) _UpperCAmelCase = nn.Sequential( RegNetConvLayer(_UpperCamelCase , _UpperCamelCase , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(_UpperCamelCase , _UpperCamelCase , stride=_UpperCamelCase , groups=_UpperCamelCase , activation=config.hidden_act ) , RegNetConvLayer(_UpperCamelCase , _UpperCamelCase , kernel_size=1 , activation=_UpperCamelCase ) , ) _UpperCAmelCase = ACTaFN[config.hidden_act] def UpperCamelCase( self , _UpperCamelCase ): _UpperCAmelCase = hidden_state _UpperCAmelCase = self.layer(_UpperCamelCase ) _UpperCAmelCase = self.shortcut(_UpperCamelCase ) hidden_state += residual _UpperCAmelCase = self.activation(_UpperCamelCase ) return hidden_state class __UpperCamelCase ( nn.Module ): def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1 ): super().__init__() _UpperCAmelCase = in_channels != out_channels or stride != 1 _UpperCAmelCase = max(1 , out_channels // config.groups_width ) _UpperCAmelCase = ( RegNetShortCut(_UpperCamelCase , _UpperCamelCase , stride=_UpperCamelCase ) if should_apply_shortcut else nn.Identity() ) _UpperCAmelCase = nn.Sequential( RegNetConvLayer(_UpperCamelCase , _UpperCamelCase , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(_UpperCamelCase , _UpperCamelCase , stride=_UpperCamelCase , groups=_UpperCamelCase , activation=config.hidden_act ) , RegNetSELayer(_UpperCamelCase , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(_UpperCamelCase , _UpperCamelCase , kernel_size=1 , activation=_UpperCamelCase ) , ) _UpperCAmelCase = ACTaFN[config.hidden_act] def UpperCamelCase( self , _UpperCamelCase ): _UpperCAmelCase = hidden_state _UpperCAmelCase = self.layer(_UpperCamelCase ) _UpperCAmelCase = self.shortcut(_UpperCamelCase ) hidden_state += residual _UpperCAmelCase = self.activation(_UpperCamelCase ) return hidden_state class __UpperCamelCase ( nn.Module ): def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 2 , _UpperCamelCase = 2 , ): super().__init__() _UpperCAmelCase = RegNetXLayer if config.layer_type == '''x''' else RegNetYLayer _UpperCAmelCase = nn.Sequential( # downsampling is done in the first layer with stride of 2 layer( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , stride=_UpperCamelCase , ) , *[layer(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) for _ in range(depth - 1 )] , ) def UpperCamelCase( self , _UpperCamelCase ): _UpperCAmelCase = self.layers(_UpperCamelCase ) return hidden_state class __UpperCamelCase ( nn.Module ): def __init__( self , _UpperCamelCase ): super().__init__() _UpperCAmelCase = nn.ModuleList([] ) # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( RegNetStage( _UpperCamelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) ) _UpperCAmelCase = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for (in_channels, out_channels), depth in zip(_UpperCamelCase , config.depths[1:] ): self.stages.append(RegNetStage(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , depth=_UpperCamelCase ) ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = False , _UpperCamelCase = True ): _UpperCAmelCase = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: _UpperCAmelCase = hidden_states + (hidden_state,) _UpperCAmelCase = stage_module(_UpperCamelCase ) if output_hidden_states: _UpperCAmelCase = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return BaseModelOutputWithNoAttention(last_hidden_state=_UpperCamelCase , hidden_states=_UpperCamelCase ) class __UpperCamelCase ( A__ ): __A : List[str] = RegNetConfig __A : Any = """regnet""" __A : Optional[int] = """pixel_values""" __A : Any = True def UpperCamelCase( self , _UpperCamelCase ): if isinstance(_UpperCamelCase , nn.Convad ): nn.init.kaiming_normal_(module.weight , mode='''fan_out''' , nonlinearity='''relu''' ) elif isinstance(_UpperCamelCase , (nn.BatchNormad, nn.GroupNorm) ): nn.init.constant_(module.weight , 1 ) nn.init.constant_(module.bias , 0 ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase=False ): if isinstance(_UpperCamelCase , _UpperCamelCase ): _UpperCAmelCase = value UpperCAmelCase_ = r"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n" UpperCAmelCase_ = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n" @add_start_docstrings( """The bare RegNet model outputting raw features without any specific head on top.""" , A__ , ) # Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet class __UpperCamelCase ( A__ ): def __init__( self , _UpperCamelCase ): super().__init__(_UpperCamelCase ) _UpperCAmelCase = config _UpperCAmelCase = RegNetEmbeddings(_UpperCamelCase ) _UpperCAmelCase = RegNetEncoder(_UpperCamelCase ) _UpperCAmelCase = nn.AdaptiveAvgPoolad((1, 1) ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(_UpperCamelCase ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=_UpperCamelCase , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None ): _UpperCAmelCase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _UpperCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict _UpperCAmelCase = self.embedder(_UpperCamelCase ) _UpperCAmelCase = self.encoder( _UpperCamelCase , output_hidden_states=_UpperCamelCase , return_dict=_UpperCamelCase ) _UpperCAmelCase = encoder_outputs[0] _UpperCAmelCase = self.pooler(_UpperCamelCase ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=_UpperCamelCase , pooler_output=_UpperCamelCase , hidden_states=encoder_outputs.hidden_states , ) @add_start_docstrings( """ RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. """ , A__ , ) # Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet class __UpperCamelCase ( A__ ): def __init__( self , _UpperCamelCase ): super().__init__(_UpperCamelCase ) _UpperCAmelCase = config.num_labels _UpperCAmelCase = RegNetModel(_UpperCamelCase ) # classification head _UpperCAmelCase = nn.Sequential( nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(_UpperCamelCase ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_UpperCamelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def UpperCamelCase( self , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , ): _UpperCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict _UpperCAmelCase = self.regnet(_UpperCamelCase , output_hidden_states=_UpperCamelCase , return_dict=_UpperCamelCase ) _UpperCAmelCase = outputs.pooler_output if return_dict else outputs[1] _UpperCAmelCase = self.classifier(_UpperCamelCase ) _UpperCAmelCase = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: _UpperCAmelCase = '''regression''' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): _UpperCAmelCase = '''single_label_classification''' else: _UpperCAmelCase = '''multi_label_classification''' if self.config.problem_type == "regression": _UpperCAmelCase = MSELoss() if self.num_labels == 1: _UpperCAmelCase = loss_fct(logits.squeeze() , labels.squeeze() ) else: _UpperCAmelCase = loss_fct(_UpperCamelCase , _UpperCamelCase ) elif self.config.problem_type == "single_label_classification": _UpperCAmelCase = CrossEntropyLoss() _UpperCAmelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": _UpperCAmelCase = BCEWithLogitsLoss() _UpperCAmelCase = loss_fct(_UpperCamelCase , _UpperCamelCase ) if not return_dict: _UpperCAmelCase = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=_UpperCamelCase , logits=_UpperCamelCase , hidden_states=outputs.hidden_states )
32
from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __UpperCamelCase ( A__ ): __A : Dict = ["""image_processor""", """tokenizer"""] __A : List[str] = """BridgeTowerImageProcessor""" __A : str = ("""RobertaTokenizer""", """RobertaTokenizerFast""") def __init__( self , _UpperCamelCase , _UpperCamelCase ): super().__init__(_UpperCamelCase , _UpperCamelCase ) def __call__( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = True , _UpperCamelCase = False , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = 0 , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = True , _UpperCamelCase = None , **_UpperCamelCase , ): _UpperCAmelCase = self.tokenizer( text=_UpperCamelCase , add_special_tokens=_UpperCamelCase , padding=_UpperCamelCase , truncation=_UpperCamelCase , max_length=_UpperCamelCase , stride=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_token_type_ids=_UpperCamelCase , return_attention_mask=_UpperCamelCase , return_overflowing_tokens=_UpperCamelCase , return_special_tokens_mask=_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , return_length=_UpperCamelCase , verbose=_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase , ) # add pixel_values + pixel_mask _UpperCAmelCase = self.image_processor( _UpperCamelCase , return_tensors=_UpperCamelCase , do_normalize=_UpperCamelCase , do_center_crop=_UpperCamelCase , **_UpperCamelCase ) encoding.update(_UpperCamelCase ) return encoding def UpperCamelCase( self , *_UpperCamelCase , **_UpperCamelCase ): return self.tokenizer.batch_decode(*_UpperCamelCase , **_UpperCamelCase ) def UpperCamelCase( self , *_UpperCamelCase , **_UpperCamelCase ): return self.tokenizer.decode(*_UpperCamelCase , **_UpperCamelCase ) @property def UpperCamelCase( self ): _UpperCAmelCase = self.tokenizer.model_input_names _UpperCAmelCase = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
32
1
import importlib import json import os from collections import OrderedDict from typing import Dict, Optional, Union # Build the list of all feature extractors from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...feature_extraction_utils import FeatureExtractionMixin from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging from .auto_factory import _LazyAutoMapping from .configuration_auto import ( CONFIG_MAPPING_NAMES, AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings, ) UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = OrderedDict( [ ("audio-spectrogram-transformer", "ASTFeatureExtractor"), ("beit", "BeitFeatureExtractor"), ("chinese_clip", "ChineseCLIPFeatureExtractor"), ("clap", "ClapFeatureExtractor"), ("clip", "CLIPFeatureExtractor"), ("clipseg", "ViTFeatureExtractor"), ("conditional_detr", "ConditionalDetrFeatureExtractor"), ("convnext", "ConvNextFeatureExtractor"), ("cvt", "ConvNextFeatureExtractor"), ("data2vec-audio", "Wav2Vec2FeatureExtractor"), ("data2vec-vision", "BeitFeatureExtractor"), ("deformable_detr", "DeformableDetrFeatureExtractor"), ("deit", "DeiTFeatureExtractor"), ("detr", "DetrFeatureExtractor"), ("dinat", "ViTFeatureExtractor"), ("donut-swin", "DonutFeatureExtractor"), ("dpt", "DPTFeatureExtractor"), ("encodec", "EncodecFeatureExtractor"), ("flava", "FlavaFeatureExtractor"), ("glpn", "GLPNFeatureExtractor"), ("groupvit", "CLIPFeatureExtractor"), ("hubert", "Wav2Vec2FeatureExtractor"), ("imagegpt", "ImageGPTFeatureExtractor"), ("layoutlmv2", "LayoutLMv2FeatureExtractor"), ("layoutlmv3", "LayoutLMv3FeatureExtractor"), ("levit", "LevitFeatureExtractor"), ("maskformer", "MaskFormerFeatureExtractor"), ("mctct", "MCTCTFeatureExtractor"), ("mobilenet_v1", "MobileNetV1FeatureExtractor"), ("mobilenet_v2", "MobileNetV2FeatureExtractor"), ("mobilevit", "MobileViTFeatureExtractor"), ("nat", "ViTFeatureExtractor"), ("owlvit", "OwlViTFeatureExtractor"), ("perceiver", "PerceiverFeatureExtractor"), ("poolformer", "PoolFormerFeatureExtractor"), ("regnet", "ConvNextFeatureExtractor"), ("resnet", "ConvNextFeatureExtractor"), ("segformer", "SegformerFeatureExtractor"), ("sew", "Wav2Vec2FeatureExtractor"), ("sew-d", "Wav2Vec2FeatureExtractor"), ("speech_to_text", "Speech2TextFeatureExtractor"), ("speecht5", "SpeechT5FeatureExtractor"), ("swiftformer", "ViTFeatureExtractor"), ("swin", "ViTFeatureExtractor"), ("swinv2", "ViTFeatureExtractor"), ("table-transformer", "DetrFeatureExtractor"), ("timesformer", "VideoMAEFeatureExtractor"), ("tvlt", "TvltFeatureExtractor"), ("unispeech", "Wav2Vec2FeatureExtractor"), ("unispeech-sat", "Wav2Vec2FeatureExtractor"), ("van", "ConvNextFeatureExtractor"), ("videomae", "VideoMAEFeatureExtractor"), ("vilt", "ViltFeatureExtractor"), ("vit", "ViTFeatureExtractor"), ("vit_mae", "ViTFeatureExtractor"), ("vit_msn", "ViTFeatureExtractor"), ("wav2vec2", "Wav2Vec2FeatureExtractor"), ("wav2vec2-conformer", "Wav2Vec2FeatureExtractor"), ("wavlm", "Wav2Vec2FeatureExtractor"), ("whisper", "WhisperFeatureExtractor"), ("xclip", "CLIPFeatureExtractor"), ("yolos", "YolosFeatureExtractor"), ] ) UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES) def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> List[Any]: """simple docstring""" for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items(): if class_name in extractors: _UpperCAmelCase = model_type_to_module_name(SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = importlib.import_module(F'''.{module_name}''' , '''transformers.models''' ) try: return getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) except AttributeError: continue for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items(): if getattr(SCREAMING_SNAKE_CASE_ , '''__name__''' , SCREAMING_SNAKE_CASE_ ) == class_name: return extractor # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main # init and we return the proper dummy to get an appropriate error message. _UpperCAmelCase = importlib.import_module('''transformers''' ) if hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): return getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return None def A__ ( SCREAMING_SNAKE_CASE_ : Union[str, os.PathLike] , SCREAMING_SNAKE_CASE_ : Optional[Union[str, os.PathLike]] = None , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : Optional[Dict[str, str]] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[bool, str]] = None , SCREAMING_SNAKE_CASE_ : Optional[str] = None , SCREAMING_SNAKE_CASE_ : bool = False , **SCREAMING_SNAKE_CASE_ : Optional[int] , ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = get_file_from_repo( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ , force_download=SCREAMING_SNAKE_CASE_ , resume_download=SCREAMING_SNAKE_CASE_ , proxies=SCREAMING_SNAKE_CASE_ , use_auth_token=SCREAMING_SNAKE_CASE_ , revision=SCREAMING_SNAKE_CASE_ , local_files_only=SCREAMING_SNAKE_CASE_ , ) if resolved_config_file is None: logger.info( '''Could not locate the feature extractor configuration file, will try to use the model config instead.''' ) return {} with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as reader: return json.load(SCREAMING_SNAKE_CASE_ ) class __UpperCamelCase : def __init__( self ): raise EnvironmentError( '''AutoFeatureExtractor is designed to be instantiated ''' '''using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.''' ) @classmethod @replace_list_option_in_docstrings(_UpperCamelCase ) def UpperCamelCase( cls , _UpperCamelCase , **_UpperCamelCase ): _UpperCAmelCase = kwargs.pop('''config''' , _UpperCamelCase ) _UpperCAmelCase = kwargs.pop('''trust_remote_code''' , _UpperCamelCase ) _UpperCAmelCase = True _UpperCAmelCase , _UpperCAmelCase = FeatureExtractionMixin.get_feature_extractor_dict(_UpperCamelCase , **_UpperCamelCase ) _UpperCAmelCase = config_dict.get('''feature_extractor_type''' , _UpperCamelCase ) _UpperCAmelCase = None if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ): _UpperCAmelCase = config_dict['''auto_map''']['''AutoFeatureExtractor'''] # If we don't find the feature extractor class in the feature extractor config, let's try the model config. if feature_extractor_class is None and feature_extractor_auto_map is None: if not isinstance(_UpperCamelCase , _UpperCamelCase ): _UpperCAmelCase = AutoConfig.from_pretrained(_UpperCamelCase , **_UpperCamelCase ) # It could be in `config.feature_extractor_type`` _UpperCAmelCase = getattr(_UpperCamelCase , '''feature_extractor_type''' , _UpperCamelCase ) if hasattr(_UpperCamelCase , '''auto_map''' ) and "AutoFeatureExtractor" in config.auto_map: _UpperCAmelCase = config.auto_map['''AutoFeatureExtractor'''] if feature_extractor_class is not None: _UpperCAmelCase = feature_extractor_class_from_name(_UpperCamelCase ) _UpperCAmelCase = feature_extractor_auto_map is not None _UpperCAmelCase = feature_extractor_class is not None or type(_UpperCamelCase ) in FEATURE_EXTRACTOR_MAPPING _UpperCAmelCase = resolve_trust_remote_code( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) if has_remote_code and trust_remote_code: _UpperCAmelCase = get_class_from_dynamic_module( _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ) _UpperCAmelCase = kwargs.pop('''code_revision''' , _UpperCamelCase ) if os.path.isdir(_UpperCamelCase ): feature_extractor_class.register_for_auto_class() return feature_extractor_class.from_dict(_UpperCamelCase , **_UpperCamelCase ) elif feature_extractor_class is not None: return feature_extractor_class.from_dict(_UpperCamelCase , **_UpperCamelCase ) # Last try: we use the FEATURE_EXTRACTOR_MAPPING. elif type(_UpperCamelCase ) in FEATURE_EXTRACTOR_MAPPING: _UpperCAmelCase = FEATURE_EXTRACTOR_MAPPING[type(_UpperCamelCase )] return feature_extractor_class.from_dict(_UpperCamelCase , **_UpperCamelCase ) raise ValueError( f'''Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a ''' f'''`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following ''' f'''`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}''' ) @staticmethod def UpperCamelCase( _UpperCamelCase , _UpperCamelCase ): FEATURE_EXTRACTOR_MAPPING.register(_UpperCamelCase , _UpperCamelCase )
32
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available UpperCAmelCase_ = { "configuration_xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMOnnxConfig"], "tokenization_xlm": ["XLMTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ "XLM_PRETRAINED_MODEL_ARCHIVE_LIST", "XLMForMultipleChoice", "XLMForQuestionAnswering", "XLMForQuestionAnsweringSimple", "XLMForSequenceClassification", "XLMForTokenClassification", "XLMModel", "XLMPreTrainedModel", "XLMWithLMHeadModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ "TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST", "TFXLMForMultipleChoice", "TFXLMForQuestionAnsweringSimple", "TFXLMForSequenceClassification", "TFXLMForTokenClassification", "TFXLMMainLayer", "TFXLMModel", "TFXLMPreTrainedModel", "TFXLMWithLMHeadModel", ] if TYPE_CHECKING: from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig from .tokenization_xlm import XLMTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm import ( XLM_PRETRAINED_MODEL_ARCHIVE_LIST, XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMPreTrainedModel, XLMWithLMHeadModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm import ( TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMForMultipleChoice, TFXLMForQuestionAnsweringSimple, TFXLMForSequenceClassification, TFXLMForTokenClassification, TFXLMMainLayer, TFXLMModel, TFXLMPreTrainedModel, TFXLMWithLMHeadModel, ) else: import sys UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
32
1
from __future__ import annotations def A__ ( SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> list: """simple docstring""" _UpperCAmelCase = [] _UpperCAmelCase , _UpperCAmelCase = input_list[low:mid], input_list[mid : high + 1] while left and right: result.append((left if left[0] <= right[0] else right).pop(0 ) ) _UpperCAmelCase = result + left + right return input_list def A__ ( SCREAMING_SNAKE_CASE_ : list ) -> list: """simple docstring""" if len(SCREAMING_SNAKE_CASE_ ) <= 1: return input_list _UpperCAmelCase = list(SCREAMING_SNAKE_CASE_ ) # iteration for two-way merging _UpperCAmelCase = 2 while p <= len(SCREAMING_SNAKE_CASE_ ): # getting low, high and middle value for merge-sort of single list for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ): _UpperCAmelCase = i _UpperCAmelCase = i + p - 1 _UpperCAmelCase = (low + high + 1) // 2 _UpperCAmelCase = merge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # final merge of last two parts if p * 2 >= len(SCREAMING_SNAKE_CASE_ ): _UpperCAmelCase = i _UpperCAmelCase = merge(SCREAMING_SNAKE_CASE_ , 0 , SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) - 1 ) break p *= 2 return input_list if __name__ == "__main__": UpperCAmelCase_ = input("Enter numbers separated by a comma:\n").strip() if user_input == "": UpperCAmelCase_ = [] else: UpperCAmelCase_ = [int(item.strip()) for item in user_input.split(",")] print(iter_merge_sort(unsorted))
32
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { "microsoft/biogpt": "https://huggingface.co/microsoft/biogpt/resolve/main/config.json", # See all BioGPT models at https://huggingface.co/models?filter=biogpt } class __UpperCamelCase ( A__ ): __A : Any = """biogpt""" def __init__( self , _UpperCamelCase=42384 , _UpperCamelCase=1024 , _UpperCamelCase=24 , _UpperCamelCase=16 , _UpperCamelCase=4096 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=1024 , _UpperCamelCase=0.02 , _UpperCamelCase=1e-12 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=0.0 , _UpperCamelCase=0.0 , _UpperCamelCase=1 , _UpperCamelCase=0 , _UpperCamelCase=2 , **_UpperCamelCase , ): _UpperCAmelCase = vocab_size _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = scale_embedding _UpperCAmelCase = use_cache _UpperCAmelCase = layerdrop _UpperCAmelCase = activation_dropout super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
32
1
import unittest from transformers.testing_utils import require_bsa from transformers.utils import is_bsa_available from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin if is_bsa_available(): from transformers import MarkupLMFeatureExtractor class __UpperCamelCase ( unittest.TestCase ): def __init__( self , _UpperCamelCase ): _UpperCAmelCase = parent def UpperCamelCase( self ): return {} def A__ ( ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = '''<HTML> <HEAD> <TITLE>sample document</TITLE> </HEAD> <BODY BGCOLOR="FFFFFF"> <HR> <a href="http://google.com">Goog</a> <H1>This is one header</H1> <H2>This is a another Header</H2> <P>Travel from <P> <B>SFO to JFK</B> <BR> <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B> <HR> <div style="color:#0000FF"> <h3>Traveler <b> name </b> is <p> John Doe </p> </div>''' _UpperCAmelCase = ''' <!DOCTYPE html> <html> <body> <h1>My First Heading</h1> <p>My first paragraph.</p> </body> </html> ''' return [html_string_a, html_string_a] @require_bsa class __UpperCamelCase ( A__ , unittest.TestCase ): __A : Optional[int] = MarkupLMFeatureExtractor if is_bsa_available() else None def UpperCamelCase( self ): _UpperCAmelCase = MarkupLMFeatureExtractionTester(self ) @property def UpperCamelCase( self ): return self.feature_extract_tester.prepare_feat_extract_dict() def UpperCamelCase( self ): # Initialize feature_extractor _UpperCAmelCase = self.feature_extraction_class() # Test not batched input _UpperCAmelCase = get_html_strings()[0] _UpperCAmelCase = feature_extractor(_UpperCamelCase ) # fmt: off _UpperCAmelCase = [['''sample document''', '''Goog''', '''This is one header''', '''This is a another Header''', '''Travel from''', '''SFO to JFK''', '''on May 2, 2015 at 2:00 pm. For details go to confirm.com''', '''Traveler''', '''name''', '''is''', '''John Doe''']] _UpperCAmelCase = [['''/html/head/title''', '''/html/body/a''', '''/html/body/h1''', '''/html/body/h2''', '''/html/body/p''', '''/html/body/p/p/b[1]''', '''/html/body/p/p/b[2]/i''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/b''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/p''']] # fmt: on self.assertEqual(encoding.nodes , _UpperCamelCase ) self.assertEqual(encoding.xpaths , _UpperCamelCase ) # Test batched _UpperCAmelCase = get_html_strings() _UpperCAmelCase = feature_extractor(_UpperCamelCase ) # fmt: off _UpperCAmelCase = expected_nodes + [['''My First Heading''', '''My first paragraph.''']] _UpperCAmelCase = expected_xpaths + [['''/html/body/h1''', '''/html/body/p''']] self.assertEqual(len(encoding.nodes ) , 2 ) self.assertEqual(len(encoding.xpaths ) , 2 ) self.assertEqual(encoding.nodes , _UpperCamelCase ) self.assertEqual(encoding.xpaths , _UpperCamelCase )
32
from typing import List from .keymap import KEYMAP, get_character def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> List[str]: """simple docstring""" def decorator(SCREAMING_SNAKE_CASE_ : List[Any] ): _UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''handle_key''' , [] ) handle += [key] setattr(SCREAMING_SNAKE_CASE_ , '''handle_key''' , SCREAMING_SNAKE_CASE_ ) return func return decorator def A__ ( *SCREAMING_SNAKE_CASE_ : List[str] ) -> Dict: """simple docstring""" def decorator(SCREAMING_SNAKE_CASE_ : Any ): _UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''handle_key''' , [] ) handle += keys setattr(SCREAMING_SNAKE_CASE_ , '''handle_key''' , SCREAMING_SNAKE_CASE_ ) return func return decorator class __UpperCamelCase ( A__ ): def __new__( cls , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): _UpperCAmelCase = super().__new__(cls , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) if not hasattr(_UpperCamelCase , '''key_handler''' ): setattr(_UpperCamelCase , '''key_handler''' , {} ) setattr(_UpperCamelCase , '''handle_input''' , KeyHandler.handle_input ) for value in attrs.values(): _UpperCAmelCase = getattr(_UpperCamelCase , '''handle_key''' , [] ) for key in handled_keys: _UpperCAmelCase = value return new_cls @staticmethod def UpperCamelCase( cls ): _UpperCAmelCase = get_character() if char != KEYMAP["undefined"]: _UpperCAmelCase = ord(_UpperCamelCase ) _UpperCAmelCase = cls.key_handler.get(_UpperCamelCase ) if handler: _UpperCAmelCase = char return handler(cls ) else: return None def A__ ( cls : Union[str, Any] ) -> Any: """simple docstring""" return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
32
1
import argparse import pickle import numpy as np import torch from torch import nn from transformers import ReformerConfig, ReformerModelWithLMHead from transformers.utils import logging logging.set_verbosity_info() def A__ ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Dict=None ) -> str: """simple docstring""" assert torch_layer.weight.shape == weight.shape, F'''{torch_layer} layer.weight does not match''' _UpperCAmelCase = nn.Parameter(SCREAMING_SNAKE_CASE_ ) if bias is not None: assert torch_layer.bias.shape == bias.shape, F'''{torch_layer} layer.bias does not match''' _UpperCAmelCase = nn.Parameter(SCREAMING_SNAKE_CASE_ ) def A__ ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str ) -> str: """simple docstring""" _UpperCAmelCase = np.asarray(weights[0] ) _UpperCAmelCase = np.asarray(weights[1] ) _UpperCAmelCase = np.asarray(weights[2] ) set_param( torch_layer.self_attention.query_key , torch.tensor(SCREAMING_SNAKE_CASE_ ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE_ ) , ) set_param( torch_layer.self_attention.value , torch.tensor(SCREAMING_SNAKE_CASE_ ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE_ ) , ) set_param( torch_layer.output.dense , torch.tensor(SCREAMING_SNAKE_CASE_ ).view(-1 , SCREAMING_SNAKE_CASE_ ).contiguous().transpose(0 , 1 ) , ) def A__ ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Dict: """simple docstring""" _UpperCAmelCase = np.asarray(weights[0] ) _UpperCAmelCase = np.asarray(weights[1] ) _UpperCAmelCase = np.asarray(weights[2] ) _UpperCAmelCase = np.asarray(weights[3] ) set_param( torch_layer.self_attention.query , torch.tensor(SCREAMING_SNAKE_CASE_ ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE_ ) , ) set_param( torch_layer.self_attention.key , torch.tensor(SCREAMING_SNAKE_CASE_ ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE_ ) , ) set_param( torch_layer.self_attention.value , torch.tensor(SCREAMING_SNAKE_CASE_ ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE_ ) , ) set_param( torch_layer.output.dense , torch.tensor(SCREAMING_SNAKE_CASE_ ).view(-1 , SCREAMING_SNAKE_CASE_ ).contiguous().transpose(0 , 1 ) , ) def A__ ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int ) -> Dict: """simple docstring""" _UpperCAmelCase = weights[0][0][0] _UpperCAmelCase = np.asarray(layer_norm_a[0] ) _UpperCAmelCase = np.asarray(layer_norm_a[1] ) set_param( torch_block.attention.layer_norm , torch.tensor(SCREAMING_SNAKE_CASE_ ) , torch.tensor(SCREAMING_SNAKE_CASE_ ) , ) # lsh weights + output _UpperCAmelCase = weights[0][1] if len(SCREAMING_SNAKE_CASE_ ) < 4: set_layer_weights_in_torch_lsh(SCREAMING_SNAKE_CASE_ , torch_block.attention , SCREAMING_SNAKE_CASE_ ) else: set_layer_weights_in_torch_local(SCREAMING_SNAKE_CASE_ , torch_block.attention , SCREAMING_SNAKE_CASE_ ) # intermediate weighs _UpperCAmelCase = weights[2][0][1][2] # Chunked Feed Forward if len(SCREAMING_SNAKE_CASE_ ) == 4: _UpperCAmelCase = intermediate_weights[2] # layernorm 2 _UpperCAmelCase = np.asarray(intermediate_weights[0][0] ) _UpperCAmelCase = np.asarray(intermediate_weights[0][1] ) set_param( torch_block.feed_forward.layer_norm , torch.tensor(SCREAMING_SNAKE_CASE_ ) , torch.tensor(SCREAMING_SNAKE_CASE_ ) , ) # intermediate dense _UpperCAmelCase = np.asarray(intermediate_weights[1][0] ) _UpperCAmelCase = np.asarray(intermediate_weights[1][1] ) set_param( torch_block.feed_forward.dense.dense , torch.tensor(SCREAMING_SNAKE_CASE_ ).transpose(0 , 1 ).contiguous() , torch.tensor(SCREAMING_SNAKE_CASE_ ) , ) # intermediate out _UpperCAmelCase = np.asarray(intermediate_weights[4][0] ) _UpperCAmelCase = np.asarray(intermediate_weights[4][1] ) set_param( torch_block.feed_forward.output.dense , torch.tensor(SCREAMING_SNAKE_CASE_ ).transpose(0 , 1 ).contiguous() , torch.tensor(SCREAMING_SNAKE_CASE_ ) , ) def A__ ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Any ) -> str: """simple docstring""" _UpperCAmelCase = torch_model.reformer # word embeds _UpperCAmelCase = np.asarray(weights[1] ) set_param( torch_model_reformer.embeddings.word_embeddings , torch.tensor(SCREAMING_SNAKE_CASE_ ) , ) if isinstance(weights[3] , SCREAMING_SNAKE_CASE_ ): _UpperCAmelCase = torch_model_reformer.embeddings.position_embeddings for emb_idx in range(len(position_embeddings.weights ) ): _UpperCAmelCase = np.asarray(weights[3][emb_idx][0] ) assert ( position_embeddings.weights[emb_idx].shape == emb_weights.shape ), F'''{position_embeddings[emb_idx]} emb does not match''' _UpperCAmelCase = nn.Parameter(torch.tensor(SCREAMING_SNAKE_CASE_ ) ) _UpperCAmelCase = weights[5] assert len(torch_model_reformer.encoder.layers ) * 4 == len( SCREAMING_SNAKE_CASE_ ), "HF and trax model do not have the same number of layers" for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ): _UpperCAmelCase = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)] set_block_weights_in_torch(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # output layer norm _UpperCAmelCase = np.asarray(weights[7][0] ) _UpperCAmelCase = np.asarray(weights[7][1] ) set_param( torch_model_reformer.encoder.layer_norm , torch.tensor(SCREAMING_SNAKE_CASE_ ) , torch.tensor(SCREAMING_SNAKE_CASE_ ) , ) # output embeddings _UpperCAmelCase = np.asarray(weights[9][0] ) _UpperCAmelCase = np.asarray(weights[9][1] ) set_param( torch_model.lm_head.decoder , torch.tensor(SCREAMING_SNAKE_CASE_ ).transpose(0 , 1 ).contiguous() , torch.tensor(SCREAMING_SNAKE_CASE_ ) , ) def A__ ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = ReformerConfig.from_json_file(SCREAMING_SNAKE_CASE_ ) print(F'''Building PyTorch model from configuration: {config}''' ) _UpperCAmelCase = ReformerModelWithLMHead(SCREAMING_SNAKE_CASE_ ) with open(SCREAMING_SNAKE_CASE_ , '''rb''' ) as f: _UpperCAmelCase = pickle.load(SCREAMING_SNAKE_CASE_ )['''weights'''] set_model_weights_in_torch(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , config.hidden_size ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( "--trax_model_pkl_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained Reformer model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) UpperCAmelCase_ = parser.parse_args() convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
32
import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class __UpperCamelCase : def __init__( self , _UpperCamelCase , _UpperCamelCase=13 , _UpperCamelCase=7 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=99 , _UpperCamelCase=24 , _UpperCamelCase=2 , _UpperCamelCase=6 , _UpperCamelCase=37 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=512 , _UpperCamelCase=16 , _UpperCamelCase=2 , _UpperCamelCase=0.02 , _UpperCamelCase=3 , _UpperCamelCase=None , _UpperCamelCase=1000 , ): _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = seq_length _UpperCAmelCase = is_training _UpperCAmelCase = use_input_mask _UpperCAmelCase = use_token_type_ids _UpperCAmelCase = use_labels _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = type_sequence_label_size _UpperCAmelCase = initializer_range _UpperCAmelCase = num_labels _UpperCAmelCase = scope _UpperCAmelCase = range_bbox def UpperCamelCase( self ): _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: _UpperCAmelCase = bbox[i, j, 3] _UpperCAmelCase = bbox[i, j, 1] _UpperCAmelCase = t if bbox[i, j, 2] < bbox[i, j, 0]: _UpperCAmelCase = bbox[i, j, 2] _UpperCAmelCase = bbox[i, j, 0] _UpperCAmelCase = t _UpperCAmelCase = None if self.use_input_mask: _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) _UpperCAmelCase = None if self.use_token_type_ids: _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _UpperCAmelCase = None _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _UpperCAmelCase = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def UpperCamelCase( self ): return LiltConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ): _UpperCAmelCase = LiltModel(config=_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() _UpperCAmelCase = model(_UpperCamelCase , bbox=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase ) _UpperCAmelCase = model(_UpperCamelCase , bbox=_UpperCamelCase , token_type_ids=_UpperCamelCase ) _UpperCAmelCase = model(_UpperCamelCase , bbox=_UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ): _UpperCAmelCase = self.num_labels _UpperCAmelCase = LiltForTokenClassification(config=_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() _UpperCAmelCase = model( _UpperCamelCase , bbox=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ): _UpperCAmelCase = LiltForQuestionAnswering(config=_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() _UpperCAmelCase = model( _UpperCamelCase , bbox=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , start_positions=_UpperCamelCase , end_positions=_UpperCamelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCamelCase( self ): _UpperCAmelCase = self.prepare_config_and_inputs() ( ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ) = config_and_inputs _UpperCAmelCase = { '''input_ids''': input_ids, '''bbox''': bbox, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask, } return config, inputs_dict @require_torch class __UpperCamelCase ( A__ , A__ , A__ , unittest.TestCase ): __A : Dict = ( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) __A : Optional[Any] = ( { """feature-extraction""": LiltModel, """question-answering""": LiltForQuestionAnswering, """text-classification""": LiltForSequenceClassification, """token-classification""": LiltForTokenClassification, """zero-shot""": LiltForSequenceClassification, } if is_torch_available() else {} ) __A : List[Any] = False __A : Optional[int] = False def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): return True def UpperCamelCase( self ): _UpperCAmelCase = LiltModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=_UpperCamelCase , hidden_size=37 ) def UpperCamelCase( self ): self.config_tester.run_common_tests() def UpperCamelCase( self ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCamelCase ) def UpperCamelCase( self ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: _UpperCAmelCase = type self.model_tester.create_and_check_model(*_UpperCamelCase ) def UpperCamelCase( self ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_UpperCamelCase ) def UpperCamelCase( self ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_UpperCamelCase ) @slow def UpperCamelCase( self ): for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCAmelCase = LiltModel.from_pretrained(_UpperCamelCase ) self.assertIsNotNone(_UpperCamelCase ) @require_torch @slow class __UpperCamelCase ( unittest.TestCase ): def UpperCamelCase( self ): _UpperCAmelCase = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(_UpperCamelCase ) _UpperCAmelCase = torch.tensor([[1, 2]] , device=_UpperCamelCase ) _UpperCAmelCase = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=_UpperCamelCase ) # forward pass with torch.no_grad(): _UpperCAmelCase = model(input_ids=_UpperCamelCase , bbox=_UpperCamelCase ) _UpperCAmelCase = torch.Size([1, 2, 768] ) _UpperCAmelCase = torch.tensor( [[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=_UpperCamelCase , ) self.assertTrue(outputs.last_hidden_state.shape , _UpperCamelCase ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , _UpperCamelCase , atol=1e-3 ) )
32
1
import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch) # also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml # same for Vicuna-13b from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipImageProcessor, InstructBlipConfig, InstructBlipForConditionalGeneration, InstructBlipProcessor, InstructBlipQFormerConfig, InstructBlipVisionConfig, LlamaConfig, LlamaTokenizerFast, TaConfig, TaTokenizerFast, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def A__ ( ) -> str: """simple docstring""" _UpperCAmelCase = '''https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg''' _UpperCAmelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw ).convert('''RGB''' ) return image def A__ ( SCREAMING_SNAKE_CASE_ : List[str] ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = [] # fmt: off # vision encoder rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') ) rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') ) rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') ) rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') ) rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') ) rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') ) for i in range(config.vision_config.num_hidden_layers ): rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) ) rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') ) # QFormer rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.embeddings.layernorm.weight''') ) rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.embeddings.layernorm.bias''') ) # fmt: on return rename_keys def A__ ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> List[Any]: """simple docstring""" _UpperCAmelCase = dct.pop(SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = val def A__ ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : int ) -> Union[str, Any]: """simple docstring""" for i in range(config.vision_config.num_hidden_layers ): # read in original q and v biases _UpperCAmelCase = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' ) _UpperCAmelCase = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' ) # next, set bias in the state dict _UpperCAmelCase = torch.cat((q_bias, torch.zeros_like(SCREAMING_SNAKE_CASE_ , requires_grad=SCREAMING_SNAKE_CASE_ ), v_bias) ) _UpperCAmelCase = qkv_bias def A__ ( SCREAMING_SNAKE_CASE_ : Dict ) -> int: """simple docstring""" _UpperCAmelCase = 3_64 if '''coco''' in model_name else 2_24 _UpperCAmelCase = InstructBlipVisionConfig(image_size=SCREAMING_SNAKE_CASE_ ).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "t5-xl" in model_name: _UpperCAmelCase = TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict() elif "t5-xxl" in model_name: _UpperCAmelCase = TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict() elif "vicuna-7b" in model_name: _UpperCAmelCase = LlamaConfig.from_pretrained('''decapoda-research/llama-7b-hf''' , vocab_size=3_20_01 ).to_dict() elif "vicuna-13b" in model_name: _UpperCAmelCase = LlamaConfig.from_pretrained('''decapoda-research/llama-13b-hf''' , vocab_size=3_20_01 ).to_dict() else: raise ValueError('''Model name not supported''' ) # the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1 _UpperCAmelCase = InstructBlipQFormerConfig(vocab_size=3_05_23 ).to_dict() _UpperCAmelCase = InstructBlipConfig(vision_config=SCREAMING_SNAKE_CASE_ , text_config=SCREAMING_SNAKE_CASE_ , qformer_config=SCREAMING_SNAKE_CASE_ ) return config, image_size @torch.no_grad() def A__ ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[Any]=None , SCREAMING_SNAKE_CASE_ : Dict=False ) -> int: """simple docstring""" _UpperCAmelCase = AutoTokenizer.from_pretrained('''bert-base-uncased''' , truncation_side='''left''' ) qformer_tokenizer.add_special_tokens({'''bos_token''': '''[DEC]'''} ) if "t5" in model_name: _UpperCAmelCase = TaTokenizerFast.from_pretrained('''google/flan-t5-xl''' , truncation_side='''left''' ) elif "vicuna" in model_name: # the following was used in the original implementation: # tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left") # tokenizer.add_special_tokens({"pad_token": "[PAD]"}) # tokenizer.add_special_tokens({"bos_token": "</s>"}) # tokenizer.add_special_tokens({"eos_token": "</s>"}) # tokenizer.add_special_tokens({"unk_token": "</s>"}) _UpperCAmelCase = LlamaTokenizerFast.from_pretrained( '''huggyllama/llama-7b''' , truncation_side='''left''' , bos_token='''</s>''' , unk_token='''</s>''' ) tokenizer.add_special_tokens({'''pad_token''': '''[PAD]'''} ) _UpperCAmelCase , _UpperCAmelCase = get_blipa_config(SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = InstructBlipForConditionalGeneration(SCREAMING_SNAKE_CASE_ ).eval() _UpperCAmelCase = { '''instructblip-vicuna-7b''': ('''blip2_vicuna_instruct''', '''vicuna7b'''), '''instructblip-vicuna-13b''': ('''blip2_vicuna_instruct''', '''vicuna13b'''), '''instructblip-flan-t5-xl''': ('''blip2_t5_instruct''', '''flant5xl'''), '''instructblip-flan-t5-xxl''': ('''blip2_t5_instruct''', '''flant5xxl'''), } _UpperCAmelCase , _UpperCAmelCase = model_name_to_original[model_name] # load original model print('''Loading original model...''' ) _UpperCAmelCase = '''cuda:1''' if torch.cuda.is_available() else '''cpu''' _UpperCAmelCase = '''cuda:2''' if torch.cuda.is_available() else '''cpu''' _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = load_model_and_preprocess( name=SCREAMING_SNAKE_CASE_ , model_type=SCREAMING_SNAKE_CASE_ , is_eval=SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ ) original_model.eval() print('''Done!''' ) # update state dict keys _UpperCAmelCase = original_model.state_dict() _UpperCAmelCase = create_rename_keys(SCREAMING_SNAKE_CASE_ ) for src, dest in rename_keys: rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): _UpperCAmelCase = state_dict.pop(SCREAMING_SNAKE_CASE_ ) if key.startswith('''Qformer.bert''' ): _UpperCAmelCase = key.replace('''Qformer.bert''' , '''qformer''' ) if "attention.self" in key: _UpperCAmelCase = key.replace('''self''' , '''attention''' ) if "llm_proj" in key: _UpperCAmelCase = key.replace('''llm_proj''' , '''language_projection''' ) if "t5_proj" in key: _UpperCAmelCase = key.replace('''t5_proj''' , '''language_projection''' ) if key.startswith('''llm_model''' ): _UpperCAmelCase = key.replace('''llm_model''' , '''language_model''' ) if key.startswith('''t5''' ): _UpperCAmelCase = key.replace('''t5''' , '''language''' ) _UpperCAmelCase = val # read in qv biases read_in_q_v_bias(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # note: weights get loaded in torch.float32 by default hf_model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = load_demo_image() _UpperCAmelCase = '''What is unusual about this image?''' # create processor _UpperCAmelCase = BlipImageProcessor( size={'''height''': image_size, '''width''': image_size} , image_mean=SCREAMING_SNAKE_CASE_ , image_std=SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = InstructBlipProcessor( image_processor=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , qformer_tokenizer=SCREAMING_SNAKE_CASE_ , ) _UpperCAmelCase = processor(images=SCREAMING_SNAKE_CASE_ , text=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE_ ) # make sure processor creates exact same pixel values _UpperCAmelCase = vis_processors['''eval'''](SCREAMING_SNAKE_CASE_ ).unsqueeze(0 ).to(SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = inputs.pixel_values assert torch.allclose(original_pixel_values.to(pixel_values.device ) , SCREAMING_SNAKE_CASE_ ) original_model.to(SCREAMING_SNAKE_CASE_ ) hf_model.to(SCREAMING_SNAKE_CASE_ ) with torch.no_grad(): if "vicuna" in model_name: _UpperCAmelCase = original_model({'''image''': original_pixel_values, '''text_input''': [prompt]} ).logits _UpperCAmelCase = hf_model(**SCREAMING_SNAKE_CASE_ ).logits else: _UpperCAmelCase = original_model( {'''image''': original_pixel_values, '''text_input''': [prompt], '''text_output''': ['''\n''']} ).logits _UpperCAmelCase = tokenizer('''\n''' , return_tensors='''pt''' ).input_ids.to(SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -1_00 ) _UpperCAmelCase = hf_model(**SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ).logits print('''First values of original logits:''' , original_logits[0, :3, :3] ) print('''First values of HF logits:''' , logits[0, :3, :3] ) # assert values assert original_logits.shape == logits.shape _UpperCAmelCase = 1E-4 if '''vicuna''' in model_name else 1E-5 assert torch.allclose(original_logits.to(logits.device ) , SCREAMING_SNAKE_CASE_ , atol=SCREAMING_SNAKE_CASE_ ) print('''Looks ok!''' ) print('''Generating with original model...''' ) _UpperCAmelCase = original_model.generate({'''image''': original_pixel_values, '''prompt''': prompt} , num_beams=5 ) # important: we need to cast the weights of the HF model to the appropriate type print('''Generating with HF model...''' ) _UpperCAmelCase = hf_model.generate( **SCREAMING_SNAKE_CASE_ , do_sample=SCREAMING_SNAKE_CASE_ , num_beams=5 , max_length=2_56 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , ) if "vicuna" in model_name: # convert output id 0 to 2 (eos_token_id) # TODO add this in the generate method? _UpperCAmelCase = 2 print('''Original generation:''' , SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = processor.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = [text.strip() for text in output_text] print('''HF generation:''' , SCREAMING_SNAKE_CASE_ ) if pytorch_dump_folder_path is not None: processor.save_pretrained(SCREAMING_SNAKE_CASE_ ) hf_model.save_pretrained(SCREAMING_SNAKE_CASE_ ) if push_to_hub: processor.push_to_hub(F'''Salesforce/{model_name}''' ) hf_model.push_to_hub(F'''Salesforce/{model_name}''' ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() UpperCAmelCase_ = [ "instructblip-vicuna-7b", "instructblip-vicuna-13b", "instructblip-flan-t5-xl", "instructblip-flan-t5-xxl", ] parser.add_argument( "--model_name", default="instructblip-flan-t5-xl", choices=choices, type=str, help="Path to hf config.json of model to convert", ) parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument( "--push_to_hub", action="store_true", help="Whether to push the model and processor to the hub after converting", ) UpperCAmelCase_ = parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
32
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { "RWKV/rwkv-4-169m-pile": "https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json", "RWKV/rwkv-4-430m-pile": "https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json", "RWKV/rwkv-4-1b5-pile": "https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json", "RWKV/rwkv-4-3b-pile": "https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json", "RWKV/rwkv-4-7b-pile": "https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json", "RWKV/rwkv-4-14b-pile": "https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json", "RWKV/rwkv-raven-1b5": "https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json", "RWKV/rwkv-raven-3b": "https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json", "RWKV/rwkv-raven-7b": "https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json", "RWKV/rwkv-raven-14b": "https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json", } class __UpperCamelCase ( A__ ): __A : Tuple = """rwkv""" __A : Any = {"""max_position_embeddings""": """context_length"""} def __init__( self , _UpperCamelCase=50277 , _UpperCamelCase=1024 , _UpperCamelCase=4096 , _UpperCamelCase=32 , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=1e-5 , _UpperCamelCase=0 , _UpperCamelCase=0 , _UpperCamelCase=6 , _UpperCamelCase=False , _UpperCamelCase=True , **_UpperCamelCase , ): _UpperCAmelCase = vocab_size _UpperCAmelCase = context_length _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = attention_hidden_size if attention_hidden_size is not None else hidden_size _UpperCAmelCase = intermediate_size if intermediate_size is not None else 4 * hidden_size _UpperCAmelCase = layer_norm_epsilon _UpperCAmelCase = rescale_every _UpperCAmelCase = use_cache _UpperCAmelCase = bos_token_id _UpperCAmelCase = eos_token_id super().__init__( tie_word_embeddings=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
32
1
from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import ScoreSdeVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class __UpperCamelCase ( A__ ): __A : UNetaDModel __A : ScoreSdeVeScheduler def __init__( self , _UpperCamelCase , _UpperCamelCase ): super().__init__() self.register_modules(unet=_UpperCamelCase , scheduler=_UpperCamelCase ) @torch.no_grad() def __call__( self , _UpperCamelCase = 1 , _UpperCamelCase = 2000 , _UpperCamelCase = None , _UpperCamelCase = "pil" , _UpperCamelCase = True , **_UpperCamelCase , ): _UpperCAmelCase = self.unet.config.sample_size _UpperCAmelCase = (batch_size, 3, img_size, img_size) _UpperCAmelCase = self.unet _UpperCAmelCase = randn_tensor(_UpperCamelCase , generator=_UpperCamelCase ) * self.scheduler.init_noise_sigma _UpperCAmelCase = sample.to(self.device ) self.scheduler.set_timesteps(_UpperCamelCase ) self.scheduler.set_sigmas(_UpperCamelCase ) for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): _UpperCAmelCase = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device ) # correction step for _ in range(self.scheduler.config.correct_steps ): _UpperCAmelCase = self.unet(_UpperCamelCase , _UpperCamelCase ).sample _UpperCAmelCase = self.scheduler.step_correct(_UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase ).prev_sample # prediction step _UpperCAmelCase = model(_UpperCamelCase , _UpperCamelCase ).sample _UpperCAmelCase = self.scheduler.step_pred(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase ) _UpperCAmelCase , _UpperCAmelCase = output.prev_sample, output.prev_sample_mean _UpperCAmelCase = sample_mean.clamp(0 , 1 ) _UpperCAmelCase = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": _UpperCAmelCase = self.numpy_to_pil(_UpperCamelCase ) if not return_dict: return (sample,) return ImagePipelineOutput(images=_UpperCamelCase )
32
def A__ ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> str: """simple docstring""" if a < 0 or b < 0: raise ValueError('''the value of both inputs must be positive''' ) _UpperCAmelCase = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] # remove the leading "0b" _UpperCAmelCase = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] # remove the leading "0b" _UpperCAmelCase = max(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) ) return "0b" + "".join( str(int(char_a == '''1''' and char_b == '''1''' ) ) for char_a, char_b in zip(a_binary.zfill(SCREAMING_SNAKE_CASE_ ) , b_binary.zfill(SCREAMING_SNAKE_CASE_ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
32
1
UpperCAmelCase_ = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n" UpperCAmelCase_ = [{"type": "code", "content": INSTALL_CONTENT}] UpperCAmelCase_ = { "{processor_class}": "FakeProcessorClass", "{model_class}": "FakeModelClass", "{object_class}": "FakeObjectClass", }
32
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { "tiiuae/falcon-40b": "https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json", "tiiuae/falcon-7b": "https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json", } class __UpperCamelCase ( A__ ): __A : Dict = """falcon""" __A : Any = ["""past_key_values"""] def __init__( self , _UpperCamelCase=65024 , _UpperCamelCase=4544 , _UpperCamelCase=32 , _UpperCamelCase=71 , _UpperCamelCase=1e-5 , _UpperCamelCase=0.02 , _UpperCamelCase=True , _UpperCamelCase=0.0 , _UpperCamelCase=0.0 , _UpperCamelCase=None , _UpperCamelCase=False , _UpperCamelCase=False , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=False , _UpperCamelCase=11 , _UpperCamelCase=11 , **_UpperCamelCase , ): _UpperCAmelCase = vocab_size # Backward compatibility with n_embed kwarg _UpperCAmelCase = kwargs.pop('''n_embed''' , _UpperCamelCase ) _UpperCAmelCase = hidden_size if n_embed is None else n_embed _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = layer_norm_epsilon _UpperCAmelCase = initializer_range _UpperCAmelCase = use_cache _UpperCAmelCase = hidden_dropout _UpperCAmelCase = attention_dropout _UpperCAmelCase = bos_token_id _UpperCAmelCase = eos_token_id _UpperCAmelCase = num_attention_heads if num_kv_heads is None else num_kv_heads _UpperCAmelCase = alibi _UpperCAmelCase = new_decoder_architecture _UpperCAmelCase = multi_query # Ignored when new_decoder_architecture is True _UpperCAmelCase = parallel_attn _UpperCAmelCase = bias super().__init__(bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase ) @property def UpperCamelCase( self ): return self.hidden_size // self.num_attention_heads @property def UpperCamelCase( self ): return not self.alibi
32
1
from __future__ import annotations def A__ ( SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : int ) -> list[int]: """simple docstring""" _UpperCAmelCase = 0 _UpperCAmelCase = len(SCREAMING_SNAKE_CASE_ ) - 1 while i < j: if nums[i] + nums[j] == target: return [i, j] elif nums[i] + nums[j] < target: _UpperCAmelCase = i + 1 else: _UpperCAmelCase = j - 1 return [] if __name__ == "__main__": import doctest doctest.testmod() print(f'''{two_pointer([2, 7, 11, 15], 9) = }''')
32
from math import sqrt def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> bool: """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def A__ ( SCREAMING_SNAKE_CASE_ : int = 1_00_01 ) -> int: """simple docstring""" _UpperCAmelCase = 0 _UpperCAmelCase = 1 while count != nth and number < 3: number += 1 if is_prime(SCREAMING_SNAKE_CASE_ ): count += 1 while count != nth: number += 2 if is_prime(SCREAMING_SNAKE_CASE_ ): count += 1 return number if __name__ == "__main__": print(f'''{solution() = }''')
32
1
from collections import OrderedDict from ...utils import logging from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update from .configuration_auto import CONFIG_MAPPING_NAMES UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = OrderedDict( [ # Base model mapping ("albert", "FlaxAlbertModel"), ("bart", "FlaxBartModel"), ("beit", "FlaxBeitModel"), ("bert", "FlaxBertModel"), ("big_bird", "FlaxBigBirdModel"), ("blenderbot", "FlaxBlenderbotModel"), ("blenderbot-small", "FlaxBlenderbotSmallModel"), ("clip", "FlaxCLIPModel"), ("distilbert", "FlaxDistilBertModel"), ("electra", "FlaxElectraModel"), ("gpt-sw3", "FlaxGPT2Model"), ("gpt2", "FlaxGPT2Model"), ("gpt_neo", "FlaxGPTNeoModel"), ("gptj", "FlaxGPTJModel"), ("longt5", "FlaxLongT5Model"), ("marian", "FlaxMarianModel"), ("mbart", "FlaxMBartModel"), ("mt5", "FlaxMT5Model"), ("opt", "FlaxOPTModel"), ("pegasus", "FlaxPegasusModel"), ("regnet", "FlaxRegNetModel"), ("resnet", "FlaxResNetModel"), ("roberta", "FlaxRobertaModel"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormModel"), ("roformer", "FlaxRoFormerModel"), ("t5", "FlaxT5Model"), ("vision-text-dual-encoder", "FlaxVisionTextDualEncoderModel"), ("vit", "FlaxViTModel"), ("wav2vec2", "FlaxWav2Vec2Model"), ("whisper", "FlaxWhisperModel"), ("xglm", "FlaxXGLMModel"), ("xlm-roberta", "FlaxXLMRobertaModel"), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for pre-training mapping ("albert", "FlaxAlbertForPreTraining"), ("bart", "FlaxBartForConditionalGeneration"), ("bert", "FlaxBertForPreTraining"), ("big_bird", "FlaxBigBirdForPreTraining"), ("electra", "FlaxElectraForPreTraining"), ("longt5", "FlaxLongT5ForConditionalGeneration"), ("mbart", "FlaxMBartForConditionalGeneration"), ("mt5", "FlaxMT5ForConditionalGeneration"), ("roberta", "FlaxRobertaForMaskedLM"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"), ("roformer", "FlaxRoFormerForMaskedLM"), ("t5", "FlaxT5ForConditionalGeneration"), ("wav2vec2", "FlaxWav2Vec2ForPreTraining"), ("whisper", "FlaxWhisperForConditionalGeneration"), ("xlm-roberta", "FlaxXLMRobertaForMaskedLM"), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for Masked LM mapping ("albert", "FlaxAlbertForMaskedLM"), ("bart", "FlaxBartForConditionalGeneration"), ("bert", "FlaxBertForMaskedLM"), ("big_bird", "FlaxBigBirdForMaskedLM"), ("distilbert", "FlaxDistilBertForMaskedLM"), ("electra", "FlaxElectraForMaskedLM"), ("mbart", "FlaxMBartForConditionalGeneration"), ("roberta", "FlaxRobertaForMaskedLM"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"), ("roformer", "FlaxRoFormerForMaskedLM"), ("xlm-roberta", "FlaxXLMRobertaForMaskedLM"), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for Seq2Seq Causal LM mapping ("bart", "FlaxBartForConditionalGeneration"), ("blenderbot", "FlaxBlenderbotForConditionalGeneration"), ("blenderbot-small", "FlaxBlenderbotSmallForConditionalGeneration"), ("encoder-decoder", "FlaxEncoderDecoderModel"), ("longt5", "FlaxLongT5ForConditionalGeneration"), ("marian", "FlaxMarianMTModel"), ("mbart", "FlaxMBartForConditionalGeneration"), ("mt5", "FlaxMT5ForConditionalGeneration"), ("pegasus", "FlaxPegasusForConditionalGeneration"), ("t5", "FlaxT5ForConditionalGeneration"), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for Image-classsification ("beit", "FlaxBeitForImageClassification"), ("regnet", "FlaxRegNetForImageClassification"), ("resnet", "FlaxResNetForImageClassification"), ("vit", "FlaxViTForImageClassification"), ] ) UpperCAmelCase_ = OrderedDict( [ ("vision-encoder-decoder", "FlaxVisionEncoderDecoderModel"), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for Causal LM mapping ("bart", "FlaxBartForCausalLM"), ("bert", "FlaxBertForCausalLM"), ("big_bird", "FlaxBigBirdForCausalLM"), ("electra", "FlaxElectraForCausalLM"), ("gpt-sw3", "FlaxGPT2LMHeadModel"), ("gpt2", "FlaxGPT2LMHeadModel"), ("gpt_neo", "FlaxGPTNeoForCausalLM"), ("gptj", "FlaxGPTJForCausalLM"), ("opt", "FlaxOPTForCausalLM"), ("roberta", "FlaxRobertaForCausalLM"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForCausalLM"), ("xglm", "FlaxXGLMForCausalLM"), ("xlm-roberta", "FlaxXLMRobertaForCausalLM"), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for Sequence Classification mapping ("albert", "FlaxAlbertForSequenceClassification"), ("bart", "FlaxBartForSequenceClassification"), ("bert", "FlaxBertForSequenceClassification"), ("big_bird", "FlaxBigBirdForSequenceClassification"), ("distilbert", "FlaxDistilBertForSequenceClassification"), ("electra", "FlaxElectraForSequenceClassification"), ("mbart", "FlaxMBartForSequenceClassification"), ("roberta", "FlaxRobertaForSequenceClassification"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForSequenceClassification"), ("roformer", "FlaxRoFormerForSequenceClassification"), ("xlm-roberta", "FlaxXLMRobertaForSequenceClassification"), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for Question Answering mapping ("albert", "FlaxAlbertForQuestionAnswering"), ("bart", "FlaxBartForQuestionAnswering"), ("bert", "FlaxBertForQuestionAnswering"), ("big_bird", "FlaxBigBirdForQuestionAnswering"), ("distilbert", "FlaxDistilBertForQuestionAnswering"), ("electra", "FlaxElectraForQuestionAnswering"), ("mbart", "FlaxMBartForQuestionAnswering"), ("roberta", "FlaxRobertaForQuestionAnswering"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForQuestionAnswering"), ("roformer", "FlaxRoFormerForQuestionAnswering"), ("xlm-roberta", "FlaxXLMRobertaForQuestionAnswering"), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for Token Classification mapping ("albert", "FlaxAlbertForTokenClassification"), ("bert", "FlaxBertForTokenClassification"), ("big_bird", "FlaxBigBirdForTokenClassification"), ("distilbert", "FlaxDistilBertForTokenClassification"), ("electra", "FlaxElectraForTokenClassification"), ("roberta", "FlaxRobertaForTokenClassification"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForTokenClassification"), ("roformer", "FlaxRoFormerForTokenClassification"), ("xlm-roberta", "FlaxXLMRobertaForTokenClassification"), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for Multiple Choice mapping ("albert", "FlaxAlbertForMultipleChoice"), ("bert", "FlaxBertForMultipleChoice"), ("big_bird", "FlaxBigBirdForMultipleChoice"), ("distilbert", "FlaxDistilBertForMultipleChoice"), ("electra", "FlaxElectraForMultipleChoice"), ("roberta", "FlaxRobertaForMultipleChoice"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMultipleChoice"), ("roformer", "FlaxRoFormerForMultipleChoice"), ("xlm-roberta", "FlaxXLMRobertaForMultipleChoice"), ] ) UpperCAmelCase_ = OrderedDict( [ ("bert", "FlaxBertForNextSentencePrediction"), ] ) UpperCAmelCase_ = OrderedDict( [ ("speech-encoder-decoder", "FlaxSpeechEncoderDecoderModel"), ("whisper", "FlaxWhisperForConditionalGeneration"), ] ) UpperCAmelCase_ = OrderedDict( [ ("whisper", "FlaxWhisperForAudioClassification"), ] ) UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES) UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES) UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES ) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES ) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES ) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES ) class __UpperCamelCase ( _BaseAutoModelClass ): __A : List[str] = FLAX_MODEL_MAPPING UpperCAmelCase_ = auto_class_update(FlaxAutoModel) class __UpperCamelCase ( _BaseAutoModelClass ): __A : Optional[Any] = FLAX_MODEL_FOR_PRETRAINING_MAPPING UpperCAmelCase_ = auto_class_update(FlaxAutoModelForPreTraining, head_doc="pretraining") class __UpperCamelCase ( _BaseAutoModelClass ): __A : List[Any] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING UpperCAmelCase_ = auto_class_update(FlaxAutoModelForCausalLM, head_doc="causal language modeling") class __UpperCamelCase ( _BaseAutoModelClass ): __A : List[str] = FLAX_MODEL_FOR_MASKED_LM_MAPPING UpperCAmelCase_ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="masked language modeling") class __UpperCamelCase ( _BaseAutoModelClass ): __A : Dict = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING UpperCAmelCase_ = auto_class_update( FlaxAutoModelForSeqaSeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base" ) class __UpperCamelCase ( _BaseAutoModelClass ): __A : List[str] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING UpperCAmelCase_ = auto_class_update( FlaxAutoModelForSequenceClassification, head_doc="sequence classification" ) class __UpperCamelCase ( _BaseAutoModelClass ): __A : Dict = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING UpperCAmelCase_ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="question answering") class __UpperCamelCase ( _BaseAutoModelClass ): __A : Union[str, Any] = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING UpperCAmelCase_ = auto_class_update( FlaxAutoModelForTokenClassification, head_doc="token classification" ) class __UpperCamelCase ( _BaseAutoModelClass ): __A : Union[str, Any] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING UpperCAmelCase_ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="multiple choice") class __UpperCamelCase ( _BaseAutoModelClass ): __A : Any = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING UpperCAmelCase_ = auto_class_update( FlaxAutoModelForNextSentencePrediction, head_doc="next sentence prediction" ) class __UpperCamelCase ( _BaseAutoModelClass ): __A : Dict = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING UpperCAmelCase_ = auto_class_update( FlaxAutoModelForImageClassification, head_doc="image classification" ) class __UpperCamelCase ( _BaseAutoModelClass ): __A : Optional[int] = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING UpperCAmelCase_ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="vision-to-text modeling") class __UpperCamelCase ( _BaseAutoModelClass ): __A : str = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING UpperCAmelCase_ = auto_class_update( FlaxAutoModelForSpeechSeqaSeq, head_doc="sequence-to-sequence speech-to-text modeling" )
32
def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> bool: """simple docstring""" if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): _UpperCAmelCase = F'''Input value of [number={number}] must be an integer''' raise TypeError(SCREAMING_SNAKE_CASE_ ) if number < 0: return False _UpperCAmelCase = number * number while number > 0: if number % 10 != number_square % 10: return False number //= 10 number_square //= 10 return True if __name__ == "__main__": import doctest doctest.testmod()
32
1
import numpy as np def A__ ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : float ) -> np.ndarray: """simple docstring""" return np.where(vector > 0 , SCREAMING_SNAKE_CASE_ , (alpha * (np.exp(SCREAMING_SNAKE_CASE_ ) - 1)) ) if __name__ == "__main__": import doctest doctest.testmod()
32
from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Value from .base import TaskTemplate @dataclass(frozen=A__ ) class __UpperCamelCase ( A__ ): __A : str = field(default="""language-modeling""" , metadata={"""include_in_asdict_even_if_is_default""": True} ) __A : ClassVar[Features] = Features({"""text""": Value("""string""" )} ) __A : ClassVar[Features] = Features({} ) __A : str = "text" @property def UpperCamelCase( self ): return {self.text_column: "text"}
32
1
import argparse from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta from transformers.utils import logging logging.set_verbosity_info() def A__ ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = TaConfig.from_json_file(SCREAMING_SNAKE_CASE_ ) print(F'''Building PyTorch model from configuration: {config}''' ) _UpperCAmelCase = TaForConditionalGeneration(SCREAMING_SNAKE_CASE_ ) # Load weights from tf checkpoint load_tf_weights_in_ta(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) model.save_pretrained(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) UpperCAmelCase_ = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
32
import os import re import warnings from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer if TYPE_CHECKING: from ...tokenization_utils_base import TextInput from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = {"vocab_file": "spiece.model"} UpperCAmelCase_ = { "vocab_file": { "t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model", "t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model", "t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model", "t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model", "t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model", } } # TODO(PVP) - this should be removed in Transformers v5 UpperCAmelCase_ = { "t5-small": 5_12, "t5-base": 5_12, "t5-large": 5_12, "t5-3b": 5_12, "t5-11b": 5_12, } UpperCAmelCase_ = "▁" class __UpperCamelCase ( A__ ): __A : Any = VOCAB_FILES_NAMES __A : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP __A : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __A : Tuple = ["""input_ids""", """attention_mask"""] def __init__( self , _UpperCamelCase , _UpperCamelCase="</s>" , _UpperCamelCase="<unk>" , _UpperCamelCase="<pad>" , _UpperCamelCase=100 , _UpperCamelCase=None , _UpperCamelCase = None , _UpperCamelCase=True , **_UpperCamelCase , ): # Add extra_ids to the special token list if extra_ids > 0 and additional_special_tokens is None: _UpperCAmelCase = [f'''<extra_id_{i}>''' for i in range(_UpperCamelCase )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra_id special tokens _UpperCAmelCase = len(set(filter(lambda _UpperCamelCase : bool('''extra_id''' in str(_UpperCamelCase ) ) , _UpperCamelCase ) ) ) if extra_tokens != extra_ids: raise ValueError( f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are''' ''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids''' ''' tokens''' ) if legacy: logger.warning_once( f'''You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to''' ''' read the related pull request available at https://github.com/huggingface/transformers/pull/24565''' ) _UpperCAmelCase = legacy _UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , pad_token=_UpperCamelCase , extra_ids=_UpperCamelCase , additional_special_tokens=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , legacy=_UpperCamelCase , **_UpperCamelCase , ) _UpperCAmelCase = vocab_file _UpperCAmelCase = extra_ids _UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_UpperCamelCase ) @staticmethod def UpperCamelCase( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes: _UpperCAmelCase = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path] if init_max_model_length is not None and init_max_model_length != max_model_length: return init_max_model_length elif init_max_model_length is None: warnings.warn( '''This tokenizer was incorrectly instantiated with a model max length of''' f''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this''' ''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with''' ''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on''' f''' {pretrained_model_name_or_path} automatically truncating your input to''' f''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences''' f''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with''' ''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please''' ''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , _UpperCamelCase , ) return max_model_length @property def UpperCamelCase( self ): return self.sp_model.get_piece_size() + self._extra_ids def UpperCamelCase( self ): _UpperCAmelCase = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase ) # normal case: some special tokens if token_ids_a is None: return ([0] * len(_UpperCamelCase )) + [1] return ([0] * len(_UpperCamelCase )) + [1] + ([0] * len(_UpperCamelCase )) + [1] def UpperCamelCase( self ): return list( set(filter(lambda _UpperCamelCase : bool(re.search(R'''<extra_id_\d+>''' , _UpperCamelCase ) ) is not None , self.additional_special_tokens ) ) ) def UpperCamelCase( self ): return [self._convert_token_to_id(_UpperCamelCase ) for token in self.get_sentinel_tokens()] def UpperCamelCase( self , _UpperCamelCase ): if len(_UpperCamelCase ) > 0 and token_ids[-1] == self.eos_token_id: warnings.warn( f'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated''' ''' eos tokens being added.''' ) return token_ids else: return token_ids + [self.eos_token_id] def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None ): _UpperCAmelCase = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None ): _UpperCAmelCase = self._add_eos_if_not_present(_UpperCamelCase ) if token_ids_a is None: return token_ids_a else: _UpperCAmelCase = self._add_eos_if_not_present(_UpperCamelCase ) return token_ids_a + token_ids_a def __getstate__( self ): _UpperCAmelCase = self.__dict__.copy() _UpperCAmelCase = None return state def __setstate__( self , _UpperCamelCase ): _UpperCAmelCase = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): _UpperCAmelCase = {} _UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def UpperCamelCase( self , _UpperCamelCase , **_UpperCamelCase ): # Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at # the beginning of the text if not self.legacy: _UpperCAmelCase = SPIECE_UNDERLINE + text.replace(_UpperCamelCase , ''' ''' ) return super().tokenize(_UpperCamelCase , **_UpperCamelCase ) def UpperCamelCase( self , _UpperCamelCase , **_UpperCamelCase ): if not self.legacy: _UpperCAmelCase = text.startswith(_UpperCamelCase ) if is_first: _UpperCAmelCase = text[1:] _UpperCAmelCase = self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase ) if not self.legacy and not is_first and not text.startswith(''' ''' ) and tokens[0].startswith(_UpperCamelCase ): _UpperCAmelCase = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:] return tokens def UpperCamelCase( self , _UpperCamelCase ): if token.startswith('''<extra_id_''' ): _UpperCAmelCase = re.match(R'''<extra_id_(\d+)>''' , _UpperCamelCase ) _UpperCAmelCase = int(match.group(1 ) ) return self.vocab_size - num - 1 return self.sp_model.piece_to_id(_UpperCamelCase ) def UpperCamelCase( self , _UpperCamelCase ): if index < self.sp_model.get_piece_size(): _UpperCAmelCase = self.sp_model.IdToPiece(_UpperCamelCase ) else: _UpperCAmelCase = f'''<extra_id_{self.vocab_size - 1 - index}>''' return token def UpperCamelCase( self , _UpperCamelCase ): _UpperCAmelCase = [] _UpperCAmelCase = '''''' _UpperCAmelCase = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(_UpperCamelCase ) + token _UpperCAmelCase = True _UpperCAmelCase = [] else: current_sub_tokens.append(_UpperCamelCase ) _UpperCAmelCase = False out_string += self.sp_model.decode(_UpperCamelCase ) return out_string.strip() def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None ): if not os.path.isdir(_UpperCamelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return _UpperCAmelCase = os.path.join( _UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _UpperCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(_UpperCamelCase , '''wb''' ) as fi: _UpperCAmelCase = self.sp_model.serialized_model_proto() fi.write(_UpperCamelCase ) return (out_vocab_file,)
32
1
import argparse import shlex import runhouse as rh if __name__ == "__main__": # Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access # setup instructions, if using on-demand hardware # If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster # If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster # Throw an error if user passes both BYO and on-demand cluster args # Otherwise, use default values UpperCAmelCase_ = argparse.ArgumentParser() parser.add_argument("--user", type=str, default="ubuntu") parser.add_argument("--host", type=str, default="localhost") parser.add_argument("--key_path", type=str, default=None) parser.add_argument("--instance", type=str, default="V100:1") parser.add_argument("--provider", type=str, default="cheapest") parser.add_argument("--use_spot", type=bool, default=False) parser.add_argument("--example", type=str, default="pytorch/text-generation/run_generation.py") UpperCAmelCase_ , UpperCAmelCase_ = parser.parse_known_args() if args.host != "localhost": if args.instance != "V100:1" or args.provider != "cheapest": raise ValueError("Cannot specify both BYO and on-demand cluster args") UpperCAmelCase_ = rh.cluster( name="rh-cluster", ips=[args.host], ssh_creds={"ssh_user": args.user, "ssh_private_key": args.key_path} ) else: UpperCAmelCase_ = rh.cluster( name="rh-cluster", instance_type=args.instance, provider=args.provider, use_spot=args.use_spot ) UpperCAmelCase_ = args.example.rsplit("/", 1)[0] # Set up remote environment cluster.install_packages(["pip:./"]) # Installs transformers from local source # Note transformers is copied into the home directory on the remote machine, so we can install from there cluster.run([f'''pip install -r transformers/examples/{example_dir}/requirements.txt''']) cluster.run(["pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117"]) # Run example. You can bypass the CLI wrapper and paste your own code here. cluster.run([f'''python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}''']) # Alternatively, we can just import and run a training function (especially if there's no wrapper CLI): # from my_script... import train # reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard'] # launch_train_gpu = rh.function(fn=train, # system=gpu, # reqs=reqs, # name='train_bert_glue') # # We can pass in arguments just like we would to a function: # launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16 # stream_logs=True)
32
from __future__ import annotations def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> bool: """simple docstring""" _UpperCAmelCase = str(SCREAMING_SNAKE_CASE_ ) return len(SCREAMING_SNAKE_CASE_ ) == 9 and set(SCREAMING_SNAKE_CASE_ ) == set('''123456789''' ) def A__ ( ) -> int | None: """simple docstring""" for base_num in range(99_99 , 49_99 , -1 ): _UpperCAmelCase = 10_00_02 * base_num if is_9_pandigital(SCREAMING_SNAKE_CASE_ ): return candidate for base_num in range(3_33 , 99 , -1 ): _UpperCAmelCase = 1_00_20_03 * base_num if is_9_pandigital(SCREAMING_SNAKE_CASE_ ): return candidate return None if __name__ == "__main__": print(f'''{solution() = }''')
32
1
class __UpperCamelCase : def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = graph self._normalize_graph(_UpperCamelCase , _UpperCamelCase ) _UpperCAmelCase = len(_UpperCamelCase ) _UpperCAmelCase = None def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase ): if sources is int: _UpperCAmelCase = [sources] if sinks is int: _UpperCAmelCase = [sinks] if len(_UpperCamelCase ) == 0 or len(_UpperCamelCase ) == 0: return _UpperCAmelCase = sources[0] _UpperCAmelCase = sinks[0] # make fake vertex if there are more # than one source or sink if len(_UpperCamelCase ) > 1 or len(_UpperCamelCase ) > 1: _UpperCAmelCase = 0 for i in sources: max_input_flow += sum(self.graph[i] ) _UpperCAmelCase = len(self.graph ) + 1 for room in self.graph: room.insert(0 , 0 ) self.graph.insert(0 , [0] * size ) for i in sources: _UpperCAmelCase = max_input_flow _UpperCAmelCase = 0 _UpperCAmelCase = len(self.graph ) + 1 for room in self.graph: room.append(0 ) self.graph.append([0] * size ) for i in sinks: _UpperCAmelCase = max_input_flow _UpperCAmelCase = size - 1 def UpperCamelCase( self ): if self.maximum_flow_algorithm is None: raise Exception('''You need to set maximum flow algorithm before.''' ) if self.source_index is None or self.sink_index is None: return 0 self.maximum_flow_algorithm.execute() return self.maximum_flow_algorithm.getMaximumFlow() def UpperCamelCase( self , _UpperCamelCase ): _UpperCAmelCase = algorithm(self ) class __UpperCamelCase : def __init__( self , _UpperCamelCase ): _UpperCAmelCase = flow_network _UpperCAmelCase = flow_network.verticesCount _UpperCAmelCase = flow_network.sourceIndex _UpperCAmelCase = flow_network.sinkIndex # it's just a reference, so you shouldn't change # it in your algorithms, use deep copy before doing that _UpperCAmelCase = flow_network.graph _UpperCAmelCase = False def UpperCamelCase( self ): if not self.executed: self._algorithm() _UpperCAmelCase = True def UpperCamelCase( self ): pass class __UpperCamelCase ( A__ ): def __init__( self , _UpperCamelCase ): super().__init__(_UpperCamelCase ) # use this to save your result _UpperCAmelCase = -1 def UpperCamelCase( self ): if not self.executed: raise Exception('''You should execute algorithm before using its result!''' ) return self.maximum_flow class __UpperCamelCase ( A__ ): def __init__( self , _UpperCamelCase ): super().__init__(_UpperCamelCase ) _UpperCAmelCase = [[0] * self.verticies_count for i in range(self.verticies_count )] _UpperCAmelCase = [0] * self.verticies_count _UpperCAmelCase = [0] * self.verticies_count def UpperCamelCase( self ): _UpperCAmelCase = self.verticies_count # push some substance to graph for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ): self.preflow[self.source_index][nextvertex_index] += bandwidth self.preflow[nextvertex_index][self.source_index] -= bandwidth self.excesses[nextvertex_index] += bandwidth # Relabel-to-front selection rule _UpperCAmelCase = [ i for i in range(self.verticies_count ) if i != self.source_index and i != self.sink_index ] # move through list _UpperCAmelCase = 0 while i < len(_UpperCamelCase ): _UpperCAmelCase = vertices_list[i] _UpperCAmelCase = self.heights[vertex_index] self.process_vertex(_UpperCamelCase ) if self.heights[vertex_index] > previous_height: # if it was relabeled, swap elements # and start from 0 index vertices_list.insert(0 , vertices_list.pop(_UpperCamelCase ) ) _UpperCAmelCase = 0 else: i += 1 _UpperCAmelCase = sum(self.preflow[self.source_index] ) def UpperCamelCase( self , _UpperCamelCase ): while self.excesses[vertex_index] > 0: for neighbour_index in range(self.verticies_count ): # if it's neighbour and current vertex is higher if ( self.graph[vertex_index][neighbour_index] - self.preflow[vertex_index][neighbour_index] > 0 and self.heights[vertex_index] > self.heights[neighbour_index] ): self.push(_UpperCamelCase , _UpperCamelCase ) self.relabel(_UpperCamelCase ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase ): _UpperCAmelCase = min( self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , ) self.preflow[from_index][to_index] += preflow_delta self.preflow[to_index][from_index] -= preflow_delta self.excesses[from_index] -= preflow_delta self.excesses[to_index] += preflow_delta def UpperCamelCase( self , _UpperCamelCase ): _UpperCAmelCase = None for to_index in range(self.verticies_count ): if ( self.graph[vertex_index][to_index] - self.preflow[vertex_index][to_index] > 0 ) and (min_height is None or self.heights[to_index] < min_height): _UpperCAmelCase = self.heights[to_index] if min_height is not None: _UpperCAmelCase = min_height + 1 if __name__ == "__main__": UpperCAmelCase_ = [0] UpperCAmelCase_ = [3] # graph = [ # [0, 0, 4, 6, 0, 0], # [0, 0, 5, 2, 0, 0], # [0, 0, 0, 0, 4, 4], # [0, 0, 0, 0, 6, 6], # [0, 0, 0, 0, 0, 0], # [0, 0, 0, 0, 0, 0], # ] UpperCAmelCase_ = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]] # prepare our network UpperCAmelCase_ = FlowNetwork(graph, entrances, exits) # set algorithm flow_network.set_maximum_flow_algorithm(PushRelabelExecutor) # and calculate UpperCAmelCase_ = flow_network.find_maximum_flow() print(f'''maximum flow is {maximum_flow}''')
32
import numpy as np def A__ ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : float ) -> np.ndarray: """simple docstring""" return np.where(vector > 0 , SCREAMING_SNAKE_CASE_ , (alpha * (np.exp(SCREAMING_SNAKE_CASE_ ) - 1)) ) if __name__ == "__main__": import doctest doctest.testmod()
32
1
import copy import os import cva import numpy as np from matplotlib import pyplot as plt class __UpperCamelCase : def __init__( self ): _UpperCAmelCase = '''''' _UpperCAmelCase = '''''' _UpperCAmelCase = [] _UpperCAmelCase = 0 _UpperCAmelCase = 256 _UpperCAmelCase = 0 _UpperCAmelCase = 0 _UpperCAmelCase = 0 _UpperCAmelCase = 0 def UpperCamelCase( self , _UpperCamelCase ): _UpperCAmelCase = cva.imread(_UpperCamelCase , 0 ) _UpperCAmelCase = copy.deepcopy(self.img ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = plt.hist(self.img.ravel() , 256 , [0, 256] , label='''x''' ) _UpperCAmelCase = np.sum(_UpperCamelCase ) for i in range(len(_UpperCamelCase ) ): _UpperCAmelCase = x[i] / self.k self.sk += prk _UpperCAmelCase = (self.L - 1) * self.sk if self.rem != 0: _UpperCAmelCase = int(last % last ) _UpperCAmelCase = int(last + 1 if self.rem >= 0.5 else last ) self.last_list.append(_UpperCamelCase ) _UpperCAmelCase = int(np.ma.count(self.img ) / self.img[1].size ) _UpperCAmelCase = self.img[1].size for i in range(self.number_of_cols ): for j in range(self.number_of_rows ): _UpperCAmelCase = self.img[j][i] if num != self.last_list[num]: _UpperCAmelCase = self.last_list[num] cva.imwrite('''output_data/output.jpg''' , self.img ) def UpperCamelCase( self ): plt.hist(self.img.ravel() , 256 , [0, 256] ) def UpperCamelCase( self ): cva.imshow('''Output-Image''' , self.img ) cva.imshow('''Input-Image''' , self.original_image ) cva.waitKey(5000 ) cva.destroyAllWindows() if __name__ == "__main__": UpperCAmelCase_ = os.path.join(os.path.basename(__file__), "image_data/input.jpg") UpperCAmelCase_ = ConstantStretch() stretcher.stretch(file_path) stretcher.plot_histogram() stretcher.show_image()
32
UpperCAmelCase_ = { "A": ".-", "B": "-...", "C": "-.-.", "D": "-..", "E": ".", "F": "..-.", "G": "--.", "H": "....", "I": "..", "J": ".---", "K": "-.-", "L": ".-..", "M": "--", "N": "-.", "O": "---", "P": ".--.", "Q": "--.-", "R": ".-.", "S": "...", "T": "-", "U": "..-", "V": "...-", "W": ".--", "X": "-..-", "Y": "-.--", "Z": "--..", "1": ".----", "2": "..---", "3": "...--", "4": "....-", "5": ".....", "6": "-....", "7": "--...", "8": "---..", "9": "----.", "0": "-----", "&": ".-...", "@": ".--.-.", ":": "---...", ",": "--..--", ".": ".-.-.-", "'": ".----.", "\"": ".-..-.", "?": "..--..", "/": "-..-.", "=": "-...-", "+": ".-.-.", "-": "-....-", "(": "-.--.", ")": "-.--.-", "!": "-.-.--", " ": "/" } # Exclamation mark is not in ITU-R recommendation # fmt: on UpperCAmelCase_ = {value: key for key, value in MORSE_CODE_DICT.items()} def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> str: """simple docstring""" return " ".join(MORSE_CODE_DICT[char] for char in message.upper() ) def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> str: """simple docstring""" return "".join(REVERSE_DICT[char] for char in message.split() ) def A__ ( ) -> None: """simple docstring""" _UpperCAmelCase = '''Morse code here!''' print(SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = encrypt(SCREAMING_SNAKE_CASE_ ) print(SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = decrypt(SCREAMING_SNAKE_CASE_ ) print(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": main()
32
1
import argparse from pathlib import Path import torch from packaging import version from torch.onnx import export from diffusers import AutoencoderKL UpperCAmelCase_ = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11") def A__ ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : tuple , SCREAMING_SNAKE_CASE_ : Path , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int=False , ) -> Union[str, Any]: """simple docstring""" output_path.parent.mkdir(parents=SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ ) # PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11, # so we check the torch version for backwards compatibility if is_torch_less_than_1_11: export( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , f=output_path.as_posix() , input_names=SCREAMING_SNAKE_CASE_ , output_names=SCREAMING_SNAKE_CASE_ , dynamic_axes=SCREAMING_SNAKE_CASE_ , do_constant_folding=SCREAMING_SNAKE_CASE_ , use_external_data_format=SCREAMING_SNAKE_CASE_ , enable_onnx_checker=SCREAMING_SNAKE_CASE_ , opset_version=SCREAMING_SNAKE_CASE_ , ) else: export( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , f=output_path.as_posix() , input_names=SCREAMING_SNAKE_CASE_ , output_names=SCREAMING_SNAKE_CASE_ , dynamic_axes=SCREAMING_SNAKE_CASE_ , do_constant_folding=SCREAMING_SNAKE_CASE_ , opset_version=SCREAMING_SNAKE_CASE_ , ) @torch.no_grad() def A__ ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : bool = False ) -> Any: """simple docstring""" _UpperCAmelCase = torch.floataa if fpaa else torch.floataa if fpaa and torch.cuda.is_available(): _UpperCAmelCase = '''cuda''' elif fpaa and not torch.cuda.is_available(): raise ValueError('''`float16` model export is only supported on GPUs with CUDA''' ) else: _UpperCAmelCase = '''cpu''' _UpperCAmelCase = Path(SCREAMING_SNAKE_CASE_ ) # VAE DECODER _UpperCAmelCase = AutoencoderKL.from_pretrained(model_path + '''/vae''' ) _UpperCAmelCase = vae_decoder.config.latent_channels # forward only through the decoder part _UpperCAmelCase = vae_decoder.decode onnx_export( SCREAMING_SNAKE_CASE_ , model_args=( torch.randn(1 , SCREAMING_SNAKE_CASE_ , 25 , 25 ).to(device=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ ), False, ) , output_path=output_path / '''vae_decoder''' / '''model.onnx''' , ordered_input_names=['''latent_sample''', '''return_dict'''] , output_names=['''sample'''] , dynamic_axes={ '''latent_sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''}, } , opset=SCREAMING_SNAKE_CASE_ , ) del vae_decoder if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() parser.add_argument( "--model_path", type=str, required=True, help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).", ) parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.") parser.add_argument( "--opset", default=14, type=int, help="The version of the ONNX operator set to use.", ) parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode") UpperCAmelCase_ = parser.parse_args() print(args.output_path) convert_models(args.model_path, args.output_path, args.opset, args.fpaa) print("SD: Done: ONNX")
32
import gc import unittest import numpy as np import torch from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class __UpperCamelCase ( A__ , unittest.TestCase ): __A : Any = DanceDiffusionPipeline __A : Any = UNCONDITIONAL_AUDIO_GENERATION_PARAMS __A : Tuple = PipelineTesterMixin.required_optional_params - { """callback""", """latents""", """callback_steps""", """output_type""", """num_images_per_prompt""", } __A : Tuple = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS __A : List[str] = False __A : str = False def UpperCamelCase( self ): torch.manual_seed(0 ) _UpperCAmelCase = UNetaDModel( block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=16000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=_UpperCamelCase , use_timestep_embedding=_UpperCamelCase , time_embedding_type='''fourier''' , mid_block_type='''UNetMidBlock1D''' , down_block_types=('''DownBlock1DNoSkip''', '''DownBlock1D''', '''AttnDownBlock1D''') , up_block_types=('''AttnUpBlock1D''', '''UpBlock1D''', '''UpBlock1DNoSkip''') , ) _UpperCAmelCase = IPNDMScheduler() _UpperCAmelCase = { '''unet''': unet, '''scheduler''': scheduler, } return components def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase=0 ): if str(_UpperCamelCase ).startswith('''mps''' ): _UpperCAmelCase = torch.manual_seed(_UpperCamelCase ) else: _UpperCAmelCase = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase ) _UpperCAmelCase = { '''batch_size''': 1, '''generator''': generator, '''num_inference_steps''': 4, } return inputs def UpperCamelCase( self ): _UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator _UpperCAmelCase = self.get_dummy_components() _UpperCAmelCase = DanceDiffusionPipeline(**_UpperCamelCase ) _UpperCAmelCase = pipe.to(_UpperCamelCase ) pipe.set_progress_bar_config(disable=_UpperCamelCase ) _UpperCAmelCase = self.get_dummy_inputs(_UpperCamelCase ) _UpperCAmelCase = pipe(**_UpperCamelCase ) _UpperCAmelCase = output.audios _UpperCAmelCase = audio[0, -3:, -3:] assert audio.shape == (1, 2, components["unet"].sample_size) _UpperCAmelCase = np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2 @skip_mps def UpperCamelCase( self ): return super().test_save_load_local() @skip_mps def UpperCamelCase( self ): return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 ) @skip_mps def UpperCamelCase( self ): return super().test_save_load_optional_components() @skip_mps def UpperCamelCase( self ): return super().test_attention_slicing_forward_pass() def UpperCamelCase( self ): super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class __UpperCamelCase ( unittest.TestCase ): def UpperCamelCase( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase( self ): _UpperCAmelCase = torch_device _UpperCAmelCase = DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' ) _UpperCAmelCase = pipe.to(_UpperCamelCase ) pipe.set_progress_bar_config(disable=_UpperCamelCase ) _UpperCAmelCase = torch.manual_seed(0 ) _UpperCAmelCase = pipe(generator=_UpperCamelCase , num_inference_steps=100 , audio_length_in_s=4.096 ) _UpperCAmelCase = output.audios _UpperCAmelCase = audio[0, -3:, -3:] assert audio.shape == (1, 2, pipe.unet.sample_size) _UpperCAmelCase = np.array([-0.0192, -0.0231, -0.0318, -0.0059, 0.0002, -0.0020] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2 def UpperCamelCase( self ): _UpperCAmelCase = torch_device _UpperCAmelCase = DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' , torch_dtype=torch.floataa ) _UpperCAmelCase = pipe.to(_UpperCamelCase ) pipe.set_progress_bar_config(disable=_UpperCamelCase ) _UpperCAmelCase = torch.manual_seed(0 ) _UpperCAmelCase = pipe(generator=_UpperCamelCase , num_inference_steps=100 , audio_length_in_s=4.096 ) _UpperCAmelCase = output.audios _UpperCAmelCase = audio[0, -3:, -3:] assert audio.shape == (1, 2, pipe.unet.sample_size) _UpperCAmelCase = np.array([-0.0367, -0.0488, -0.0771, -0.0525, -0.0444, -0.0341] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
32
1
import logging import os from typing import List, Tuple import numpy as np import psutil import torch import torch.distributed as dist from transformers import RagRetriever UpperCAmelCase_ = logging.getLogger(__name__) class __UpperCamelCase ( A__ ): def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ): super().__init__( _UpperCamelCase , question_encoder_tokenizer=_UpperCamelCase , generator_tokenizer=_UpperCamelCase , index=_UpperCamelCase , init_retrieval=_UpperCamelCase , ) _UpperCAmelCase = None def UpperCamelCase( self , _UpperCamelCase ): logger.info('''initializing retrieval''' ) # initializing a separate process group for retrieval as the default # nccl backend doesn't support gather/scatter operations while gloo # is too slow to replace nccl for the core gpu communication if dist.is_initialized(): logger.info('''dist initialized''' ) # needs to be set manually _UpperCAmelCase = self._infer_socket_ifname() # avoid clash with the NCCL port _UpperCAmelCase = str(distributed_port + 1 ) _UpperCAmelCase = dist.new_group(ranks=_UpperCamelCase , backend='''gloo''' ) # initialize retriever only on the main worker if not dist.is_initialized() or self._is_main(): logger.info('''dist not initialized / main''' ) self.index.init_index() # all processes wait untill the retriever is initialized by the main process if dist.is_initialized(): torch.distributed.barrier(group=self.process_group ) def UpperCamelCase( self ): return dist.get_rank(group=self.process_group ) == 0 def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=torch.floataa ): _UpperCAmelCase = torch.empty(_UpperCamelCase , dtype=_UpperCamelCase ) dist.scatter(_UpperCamelCase , src=0 , scatter_list=_UpperCamelCase , group=self.process_group ) return target_tensor def UpperCamelCase( self ): _UpperCAmelCase = psutil.net_if_addrs() # a hacky way to deal with varying network interface names _UpperCAmelCase = next((addr for addr in addrs if addr.startswith('''e''' )) , _UpperCamelCase ) return ifname def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase ): # single GPU training if not dist.is_initialized(): _UpperCAmelCase , _UpperCAmelCase = self._main_retrieve(_UpperCamelCase , _UpperCamelCase ) return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(_UpperCamelCase ) # distributed training _UpperCAmelCase = dist.get_world_size(group=self.process_group ) # gather logic _UpperCAmelCase = None if self._is_main(): _UpperCAmelCase = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(_UpperCamelCase )] dist.gather(torch.tensor(_UpperCamelCase ) , dst=0 , gather_list=_UpperCamelCase , group=self.process_group ) # scatter logic _UpperCAmelCase = question_hidden_states.shape[0] _UpperCAmelCase = [] _UpperCAmelCase = [] if self._is_main(): assert len(_UpperCamelCase ) == world_size _UpperCAmelCase , _UpperCAmelCase = self._main_retrieve(torch.cat(_UpperCamelCase ).numpy() , _UpperCamelCase ) _UpperCAmelCase , _UpperCAmelCase = torch.tensor(_UpperCamelCase ), torch.tensor(_UpperCamelCase ) _UpperCAmelCase = self._chunk_tensor(_UpperCamelCase , _UpperCamelCase ) _UpperCAmelCase = self._chunk_tensor(_UpperCamelCase , _UpperCamelCase ) _UpperCAmelCase = self._scattered(_UpperCamelCase , [n_queries, n_docs] , target_type=torch.intaa ) _UpperCAmelCase = self._scattered(_UpperCamelCase , [n_queries, n_docs, question_hidden_states.shape[1]] ) return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(_UpperCamelCase )
32
from collections import OrderedDict from ...utils import logging from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update from .configuration_auto import CONFIG_MAPPING_NAMES UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = OrderedDict( [ # Base model mapping ("albert", "FlaxAlbertModel"), ("bart", "FlaxBartModel"), ("beit", "FlaxBeitModel"), ("bert", "FlaxBertModel"), ("big_bird", "FlaxBigBirdModel"), ("blenderbot", "FlaxBlenderbotModel"), ("blenderbot-small", "FlaxBlenderbotSmallModel"), ("clip", "FlaxCLIPModel"), ("distilbert", "FlaxDistilBertModel"), ("electra", "FlaxElectraModel"), ("gpt-sw3", "FlaxGPT2Model"), ("gpt2", "FlaxGPT2Model"), ("gpt_neo", "FlaxGPTNeoModel"), ("gptj", "FlaxGPTJModel"), ("longt5", "FlaxLongT5Model"), ("marian", "FlaxMarianModel"), ("mbart", "FlaxMBartModel"), ("mt5", "FlaxMT5Model"), ("opt", "FlaxOPTModel"), ("pegasus", "FlaxPegasusModel"), ("regnet", "FlaxRegNetModel"), ("resnet", "FlaxResNetModel"), ("roberta", "FlaxRobertaModel"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormModel"), ("roformer", "FlaxRoFormerModel"), ("t5", "FlaxT5Model"), ("vision-text-dual-encoder", "FlaxVisionTextDualEncoderModel"), ("vit", "FlaxViTModel"), ("wav2vec2", "FlaxWav2Vec2Model"), ("whisper", "FlaxWhisperModel"), ("xglm", "FlaxXGLMModel"), ("xlm-roberta", "FlaxXLMRobertaModel"), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for pre-training mapping ("albert", "FlaxAlbertForPreTraining"), ("bart", "FlaxBartForConditionalGeneration"), ("bert", "FlaxBertForPreTraining"), ("big_bird", "FlaxBigBirdForPreTraining"), ("electra", "FlaxElectraForPreTraining"), ("longt5", "FlaxLongT5ForConditionalGeneration"), ("mbart", "FlaxMBartForConditionalGeneration"), ("mt5", "FlaxMT5ForConditionalGeneration"), ("roberta", "FlaxRobertaForMaskedLM"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"), ("roformer", "FlaxRoFormerForMaskedLM"), ("t5", "FlaxT5ForConditionalGeneration"), ("wav2vec2", "FlaxWav2Vec2ForPreTraining"), ("whisper", "FlaxWhisperForConditionalGeneration"), ("xlm-roberta", "FlaxXLMRobertaForMaskedLM"), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for Masked LM mapping ("albert", "FlaxAlbertForMaskedLM"), ("bart", "FlaxBartForConditionalGeneration"), ("bert", "FlaxBertForMaskedLM"), ("big_bird", "FlaxBigBirdForMaskedLM"), ("distilbert", "FlaxDistilBertForMaskedLM"), ("electra", "FlaxElectraForMaskedLM"), ("mbart", "FlaxMBartForConditionalGeneration"), ("roberta", "FlaxRobertaForMaskedLM"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"), ("roformer", "FlaxRoFormerForMaskedLM"), ("xlm-roberta", "FlaxXLMRobertaForMaskedLM"), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for Seq2Seq Causal LM mapping ("bart", "FlaxBartForConditionalGeneration"), ("blenderbot", "FlaxBlenderbotForConditionalGeneration"), ("blenderbot-small", "FlaxBlenderbotSmallForConditionalGeneration"), ("encoder-decoder", "FlaxEncoderDecoderModel"), ("longt5", "FlaxLongT5ForConditionalGeneration"), ("marian", "FlaxMarianMTModel"), ("mbart", "FlaxMBartForConditionalGeneration"), ("mt5", "FlaxMT5ForConditionalGeneration"), ("pegasus", "FlaxPegasusForConditionalGeneration"), ("t5", "FlaxT5ForConditionalGeneration"), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for Image-classsification ("beit", "FlaxBeitForImageClassification"), ("regnet", "FlaxRegNetForImageClassification"), ("resnet", "FlaxResNetForImageClassification"), ("vit", "FlaxViTForImageClassification"), ] ) UpperCAmelCase_ = OrderedDict( [ ("vision-encoder-decoder", "FlaxVisionEncoderDecoderModel"), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for Causal LM mapping ("bart", "FlaxBartForCausalLM"), ("bert", "FlaxBertForCausalLM"), ("big_bird", "FlaxBigBirdForCausalLM"), ("electra", "FlaxElectraForCausalLM"), ("gpt-sw3", "FlaxGPT2LMHeadModel"), ("gpt2", "FlaxGPT2LMHeadModel"), ("gpt_neo", "FlaxGPTNeoForCausalLM"), ("gptj", "FlaxGPTJForCausalLM"), ("opt", "FlaxOPTForCausalLM"), ("roberta", "FlaxRobertaForCausalLM"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForCausalLM"), ("xglm", "FlaxXGLMForCausalLM"), ("xlm-roberta", "FlaxXLMRobertaForCausalLM"), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for Sequence Classification mapping ("albert", "FlaxAlbertForSequenceClassification"), ("bart", "FlaxBartForSequenceClassification"), ("bert", "FlaxBertForSequenceClassification"), ("big_bird", "FlaxBigBirdForSequenceClassification"), ("distilbert", "FlaxDistilBertForSequenceClassification"), ("electra", "FlaxElectraForSequenceClassification"), ("mbart", "FlaxMBartForSequenceClassification"), ("roberta", "FlaxRobertaForSequenceClassification"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForSequenceClassification"), ("roformer", "FlaxRoFormerForSequenceClassification"), ("xlm-roberta", "FlaxXLMRobertaForSequenceClassification"), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for Question Answering mapping ("albert", "FlaxAlbertForQuestionAnswering"), ("bart", "FlaxBartForQuestionAnswering"), ("bert", "FlaxBertForQuestionAnswering"), ("big_bird", "FlaxBigBirdForQuestionAnswering"), ("distilbert", "FlaxDistilBertForQuestionAnswering"), ("electra", "FlaxElectraForQuestionAnswering"), ("mbart", "FlaxMBartForQuestionAnswering"), ("roberta", "FlaxRobertaForQuestionAnswering"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForQuestionAnswering"), ("roformer", "FlaxRoFormerForQuestionAnswering"), ("xlm-roberta", "FlaxXLMRobertaForQuestionAnswering"), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for Token Classification mapping ("albert", "FlaxAlbertForTokenClassification"), ("bert", "FlaxBertForTokenClassification"), ("big_bird", "FlaxBigBirdForTokenClassification"), ("distilbert", "FlaxDistilBertForTokenClassification"), ("electra", "FlaxElectraForTokenClassification"), ("roberta", "FlaxRobertaForTokenClassification"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForTokenClassification"), ("roformer", "FlaxRoFormerForTokenClassification"), ("xlm-roberta", "FlaxXLMRobertaForTokenClassification"), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for Multiple Choice mapping ("albert", "FlaxAlbertForMultipleChoice"), ("bert", "FlaxBertForMultipleChoice"), ("big_bird", "FlaxBigBirdForMultipleChoice"), ("distilbert", "FlaxDistilBertForMultipleChoice"), ("electra", "FlaxElectraForMultipleChoice"), ("roberta", "FlaxRobertaForMultipleChoice"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMultipleChoice"), ("roformer", "FlaxRoFormerForMultipleChoice"), ("xlm-roberta", "FlaxXLMRobertaForMultipleChoice"), ] ) UpperCAmelCase_ = OrderedDict( [ ("bert", "FlaxBertForNextSentencePrediction"), ] ) UpperCAmelCase_ = OrderedDict( [ ("speech-encoder-decoder", "FlaxSpeechEncoderDecoderModel"), ("whisper", "FlaxWhisperForConditionalGeneration"), ] ) UpperCAmelCase_ = OrderedDict( [ ("whisper", "FlaxWhisperForAudioClassification"), ] ) UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES) UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES) UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES ) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES ) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES ) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES ) class __UpperCamelCase ( _BaseAutoModelClass ): __A : List[str] = FLAX_MODEL_MAPPING UpperCAmelCase_ = auto_class_update(FlaxAutoModel) class __UpperCamelCase ( _BaseAutoModelClass ): __A : Optional[Any] = FLAX_MODEL_FOR_PRETRAINING_MAPPING UpperCAmelCase_ = auto_class_update(FlaxAutoModelForPreTraining, head_doc="pretraining") class __UpperCamelCase ( _BaseAutoModelClass ): __A : List[Any] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING UpperCAmelCase_ = auto_class_update(FlaxAutoModelForCausalLM, head_doc="causal language modeling") class __UpperCamelCase ( _BaseAutoModelClass ): __A : List[str] = FLAX_MODEL_FOR_MASKED_LM_MAPPING UpperCAmelCase_ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="masked language modeling") class __UpperCamelCase ( _BaseAutoModelClass ): __A : Dict = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING UpperCAmelCase_ = auto_class_update( FlaxAutoModelForSeqaSeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base" ) class __UpperCamelCase ( _BaseAutoModelClass ): __A : List[str] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING UpperCAmelCase_ = auto_class_update( FlaxAutoModelForSequenceClassification, head_doc="sequence classification" ) class __UpperCamelCase ( _BaseAutoModelClass ): __A : Dict = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING UpperCAmelCase_ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="question answering") class __UpperCamelCase ( _BaseAutoModelClass ): __A : Union[str, Any] = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING UpperCAmelCase_ = auto_class_update( FlaxAutoModelForTokenClassification, head_doc="token classification" ) class __UpperCamelCase ( _BaseAutoModelClass ): __A : Union[str, Any] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING UpperCAmelCase_ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="multiple choice") class __UpperCamelCase ( _BaseAutoModelClass ): __A : Any = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING UpperCAmelCase_ = auto_class_update( FlaxAutoModelForNextSentencePrediction, head_doc="next sentence prediction" ) class __UpperCamelCase ( _BaseAutoModelClass ): __A : Dict = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING UpperCAmelCase_ = auto_class_update( FlaxAutoModelForImageClassification, head_doc="image classification" ) class __UpperCamelCase ( _BaseAutoModelClass ): __A : Optional[int] = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING UpperCAmelCase_ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="vision-to-text modeling") class __UpperCamelCase ( _BaseAutoModelClass ): __A : str = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING UpperCAmelCase_ = auto_class_update( FlaxAutoModelForSpeechSeqaSeq, head_doc="sequence-to-sequence speech-to-text modeling" )
32
1
import warnings from ...utils import logging from .image_processing_glpn import GLPNImageProcessor UpperCAmelCase_ = logging.get_logger(__name__) class __UpperCamelCase ( A__ ): def __init__( self , *_UpperCamelCase , **_UpperCamelCase ): warnings.warn( '''The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use GLPNImageProcessor instead.''' , _UpperCamelCase , ) super().__init__(*_UpperCamelCase , **_UpperCamelCase )
32
import baseaa def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> bytes: """simple docstring""" return baseaa.baaencode(string.encode('''utf-8''' ) ) def A__ ( SCREAMING_SNAKE_CASE_ : bytes ) -> str: """simple docstring""" return baseaa.baadecode(SCREAMING_SNAKE_CASE_ ).decode('''utf-8''' ) if __name__ == "__main__": UpperCAmelCase_ = "Hello World!" UpperCAmelCase_ = baseaa_encode(test) print(encoded) UpperCAmelCase_ = baseaa_decode(encoded) print(decoded)
32
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available UpperCAmelCase_ = { "configuration_altclip": [ "ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", "AltCLIPConfig", "AltCLIPTextConfig", "AltCLIPVisionConfig", ], "processing_altclip": ["AltCLIPProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ "ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "AltCLIPPreTrainedModel", "AltCLIPModel", "AltCLIPTextModel", "AltCLIPVisionModel", ] if TYPE_CHECKING: from .configuration_altclip import ( ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, AltCLIPConfig, AltCLIPTextConfig, AltCLIPVisionConfig, ) from .processing_altclip import AltCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_altclip import ( ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, AltCLIPModel, AltCLIPPreTrainedModel, AltCLIPTextModel, AltCLIPVisionModel, ) else: import sys UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
32
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, is_batched, to_numpy_array, valid_images, ) from ...utils import TensorType, logging UpperCAmelCase_ = logging.get_logger(__name__) class __UpperCamelCase ( A__ ): __A : int = ["""pixel_values"""] def __init__( self , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = PILImageResampling.BICUBIC , _UpperCamelCase = True , _UpperCamelCase = True , _UpperCamelCase = 1 / 255 , _UpperCamelCase = None , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ): super().__init__(**_UpperCamelCase ) _UpperCAmelCase = size if size is not None else {'''height''': 224, '''width''': 224} _UpperCAmelCase = get_size_dict(_UpperCamelCase ) _UpperCAmelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} _UpperCAmelCase = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase , param_name='''crop_size''' ) _UpperCAmelCase = do_resize _UpperCAmelCase = do_rescale _UpperCAmelCase = do_normalize _UpperCAmelCase = do_center_crop _UpperCAmelCase = crop_size _UpperCAmelCase = size _UpperCAmelCase = resample _UpperCAmelCase = rescale_factor _UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN _UpperCAmelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = PILImageResampling.BILINEAR , _UpperCamelCase = None , **_UpperCamelCase , ): _UpperCAmelCase = get_size_dict(_UpperCamelCase ) if "shortest_edge" in size: _UpperCAmelCase = get_resize_output_image_size(_UpperCamelCase , size=size['''shortest_edge'''] , default_to_square=_UpperCamelCase ) # size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"]) elif "height" in size and "width" in size: _UpperCAmelCase = (size['''height'''], size['''width''']) else: raise ValueError(f'''Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}''' ) return resize(_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ): _UpperCAmelCase = get_size_dict(_UpperCamelCase ) if "height" not in size or "width" not in size: raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(_UpperCamelCase , size=(size['''height'''], size['''width''']) , data_format=_UpperCamelCase , **_UpperCamelCase ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase ): return rescale(_UpperCamelCase , scale=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ): return normalize(_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = ChannelDimension.FIRST , **_UpperCamelCase , ): _UpperCAmelCase = do_resize if do_resize is not None else self.do_resize _UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale _UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize _UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop _UpperCAmelCase = crop_size if crop_size is not None else self.crop_size _UpperCAmelCase = get_size_dict(_UpperCamelCase , param_name='''crop_size''' , default_to_square=_UpperCamelCase ) _UpperCAmelCase = resample if resample is not None else self.resample _UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor _UpperCAmelCase = image_mean if image_mean is not None else self.image_mean _UpperCAmelCase = image_std if image_std is not None else self.image_std _UpperCAmelCase = size if size is not None else self.size _UpperCAmelCase = get_size_dict(_UpperCamelCase ) if not is_batched(_UpperCamelCase ): _UpperCAmelCase = [images] if not valid_images(_UpperCamelCase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) # All transformations expect numpy arrays. _UpperCAmelCase = [to_numpy_array(_UpperCamelCase ) for image in images] if do_resize: _UpperCAmelCase = [self.resize(image=_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase ) for image in images] if do_center_crop: _UpperCAmelCase = [self.center_crop(image=_UpperCamelCase , size=_UpperCamelCase ) for image in images] if do_rescale: _UpperCAmelCase = [self.rescale(image=_UpperCamelCase , scale=_UpperCamelCase ) for image in images] if do_normalize: _UpperCAmelCase = [self.normalize(image=_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase ) for image in images] _UpperCAmelCase = [to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase ) for image in images] _UpperCAmelCase = {'''pixel_values''': images} return BatchFeature(data=_UpperCamelCase , tensor_type=_UpperCamelCase )
32
1
UpperCAmelCase_ = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/" def A__ ( SCREAMING_SNAKE_CASE_ : bytes ) -> bytes: """simple docstring""" if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): _UpperCAmelCase = F'''a bytes-like object is required, not \'{data.__class__.__name__}\'''' raise TypeError(SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = ''''''.join(bin(SCREAMING_SNAKE_CASE_ )[2:].zfill(8 ) for byte in data ) _UpperCAmelCase = len(SCREAMING_SNAKE_CASE_ ) % 6 != 0 if padding_needed: # The padding that will be added later _UpperCAmelCase = B'''=''' * ((6 - len(SCREAMING_SNAKE_CASE_ ) % 6) // 2) # Append binary_stream with arbitrary binary digits (0's by default) to make its # length a multiple of 6. binary_stream += "0" * (6 - len(SCREAMING_SNAKE_CASE_ ) % 6) else: _UpperCAmelCase = B'''''' # Encode every 6 binary digits to their corresponding Base64 character return ( "".join( B64_CHARSET[int(binary_stream[index : index + 6] , 2 )] for index in range(0 , len(SCREAMING_SNAKE_CASE_ ) , 6 ) ).encode() + padding ) def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> bytes: """simple docstring""" if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): _UpperCAmelCase = ( '''argument should be a bytes-like object or ASCII string, ''' F'''not \'{encoded_data.__class__.__name__}\'''' ) raise TypeError(SCREAMING_SNAKE_CASE_ ) # In case encoded_data is a bytes-like object, make sure it contains only # ASCII characters so we convert it to a string object if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): try: _UpperCAmelCase = encoded_data.decode('''utf-8''' ) except UnicodeDecodeError: raise ValueError('''base64 encoded data should only contain ASCII characters''' ) _UpperCAmelCase = encoded_data.count('''=''' ) # Check if the encoded string contains non base64 characters if padding: assert all( char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found." else: assert all( char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found." # Check the padding assert len(SCREAMING_SNAKE_CASE_ ) % 4 == 0 and padding < 3, "Incorrect padding" if padding: # Remove padding if there is one _UpperCAmelCase = encoded_data[:-padding] _UpperCAmelCase = ''''''.join( bin(B64_CHARSET.index(SCREAMING_SNAKE_CASE_ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2] else: _UpperCAmelCase = ''''''.join( bin(B64_CHARSET.index(SCREAMING_SNAKE_CASE_ ) )[2:].zfill(6 ) for char in encoded_data ) _UpperCAmelCase = [ int(binary_stream[index : index + 8] , 2 ) for index in range(0 , len(SCREAMING_SNAKE_CASE_ ) , 8 ) ] return bytes(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": import doctest doctest.testmod()
32
from ..utils import DummyObject, requires_backends class __UpperCamelCase ( metaclass=A__ ): __A : str = ["""torch""", """scipy"""] def __init__( self , *_UpperCamelCase , **_UpperCamelCase ): requires_backends(self , ['''torch''', '''scipy'''] ) @classmethod def UpperCamelCase( cls , *_UpperCamelCase , **_UpperCamelCase ): requires_backends(cls , ['''torch''', '''scipy'''] ) @classmethod def UpperCamelCase( cls , *_UpperCamelCase , **_UpperCamelCase ): requires_backends(cls , ['''torch''', '''scipy'''] )
32
1
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class __UpperCamelCase ( unittest.TestCase ): def UpperCamelCase( self ): # For consistency across different places the DisjunctiveConstraint is called, # dc.token_ids is a list of integers. It is also initialized only by integers. _UpperCAmelCase = [[1, 2, 4], [1, 2, 3, 4]] _UpperCAmelCase = DisjunctiveConstraint(_UpperCamelCase ) self.assertTrue(isinstance(dc.token_ids , _UpperCamelCase ) ) with self.assertRaises(_UpperCamelCase ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(_UpperCamelCase ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def UpperCamelCase( self ): # We can't have constraints that are complete subsets of another. This leads to a preverse # interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint? # It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially # fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm # will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it). _UpperCAmelCase = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(_UpperCamelCase ): DisjunctiveConstraint(_UpperCamelCase ) # fails here def UpperCamelCase( self ): _UpperCAmelCase = [[1, 2, 3], [1, 2, 4]] _UpperCAmelCase = DisjunctiveConstraint(_UpperCamelCase ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = dc.update(1 ) _UpperCAmelCase = stepped is True and completed is False and reset is False self.assertTrue(_UpperCamelCase ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = dc.update(2 ) _UpperCAmelCase = stepped is True and completed is False and reset is False self.assertTrue(_UpperCamelCase ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = dc.update(3 ) _UpperCAmelCase = stepped is True and completed is True and reset is False self.assertTrue(_UpperCamelCase ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def UpperCamelCase( self ): _UpperCAmelCase = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] _UpperCAmelCase = DisjunctiveConstraint(_UpperCamelCase ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
32
def A__ ( SCREAMING_SNAKE_CASE_ : int = 2_00_00_00 ) -> int: """simple docstring""" _UpperCAmelCase = [0 for i in range(n + 1 )] _UpperCAmelCase = 1 _UpperCAmelCase = 1 for i in range(2 , int(n**0.5 ) + 1 ): if primality_list[i] == 0: for j in range(i * i , n + 1 , SCREAMING_SNAKE_CASE_ ): _UpperCAmelCase = 1 _UpperCAmelCase = 0 for i in range(SCREAMING_SNAKE_CASE_ ): if primality_list[i] == 0: sum_of_primes += i return sum_of_primes if __name__ == "__main__": print(f'''{solution() = }''')
32
1
from typing import TYPE_CHECKING from ....utils import _LazyModule UpperCAmelCase_ = {"tokenization_tapex": ["TapexTokenizer"]} if TYPE_CHECKING: from .tokenization_tapex import TapexTokenizer else: import sys UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
32
import warnings from ...utils import logging from .image_processing_glpn import GLPNImageProcessor UpperCAmelCase_ = logging.get_logger(__name__) class __UpperCamelCase ( A__ ): def __init__( self , *_UpperCamelCase , **_UpperCamelCase ): warnings.warn( '''The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use GLPNImageProcessor instead.''' , _UpperCamelCase , ) super().__init__(*_UpperCamelCase , **_UpperCamelCase )
32
1
from __future__ import annotations import math import random from collections.abc import Collection from typing import overload class __UpperCamelCase : def __init__( self , _UpperCamelCase = None ): if components is None: _UpperCAmelCase = [] _UpperCAmelCase = list(_UpperCamelCase ) def __len__( self ): return len(self.__components ) def __str__( self ): return "(" + ",".join(map(_UpperCamelCase , self.__components ) ) + ")" def __add__( self , _UpperCamelCase ): _UpperCAmelCase = len(self ) if size == len(_UpperCamelCase ): _UpperCAmelCase = [self.__components[i] + other.component(_UpperCamelCase ) for i in range(_UpperCamelCase )] return Vector(_UpperCamelCase ) else: raise Exception('''must have the same size''' ) def __sub__( self , _UpperCamelCase ): _UpperCAmelCase = len(self ) if size == len(_UpperCamelCase ): _UpperCAmelCase = [self.__components[i] - other.component(_UpperCamelCase ) for i in range(_UpperCamelCase )] return Vector(_UpperCamelCase ) else: # error case raise Exception('''must have the same size''' ) @overload def __mul__( self , _UpperCamelCase ): ... @overload def __mul__( self , _UpperCamelCase ): ... def __mul__( self , _UpperCamelCase ): if isinstance(_UpperCamelCase , (float, int) ): _UpperCAmelCase = [c * other for c in self.__components] return Vector(_UpperCamelCase ) elif isinstance(_UpperCamelCase , _UpperCamelCase ) and len(self ) == len(_UpperCamelCase ): _UpperCAmelCase = len(self ) _UpperCAmelCase = [self.__components[i] * other.component(_UpperCamelCase ) for i in range(_UpperCamelCase )] return sum(_UpperCamelCase ) else: # error case raise Exception('''invalid operand!''' ) def UpperCamelCase( self ): return Vector(self.__components ) def UpperCamelCase( self , _UpperCamelCase ): if isinstance(_UpperCamelCase , _UpperCamelCase ) and -len(self.__components ) <= i < len(self.__components ): return self.__components[i] else: raise Exception('''index out of range''' ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase ): assert -len(self.__components ) <= pos < len(self.__components ) _UpperCAmelCase = value def UpperCamelCase( self ): if len(self.__components ) == 0: raise Exception('''Vector is empty''' ) _UpperCAmelCase = [c**2 for c in self.__components] return math.sqrt(sum(_UpperCamelCase ) ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = False ): _UpperCAmelCase = self * other _UpperCAmelCase = self.euclidean_length() * other.euclidean_length() if deg: return math.degrees(math.acos(num / den ) ) else: return math.acos(num / den ) def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> Vector: """simple docstring""" assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return Vector([0] * dimension ) def A__ ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> Vector: """simple docstring""" assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and (isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )) _UpperCAmelCase = [0] * dimension _UpperCAmelCase = 1 return Vector(SCREAMING_SNAKE_CASE_ ) def A__ ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : Vector , SCREAMING_SNAKE_CASE_ : Vector ) -> Vector: """simple docstring""" assert ( isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and (isinstance(SCREAMING_SNAKE_CASE_ , (int, float) )) ) return x * scalar + y def A__ ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> Vector: """simple docstring""" random.seed(SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = [random.randint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for _ in range(SCREAMING_SNAKE_CASE_ )] return Vector(SCREAMING_SNAKE_CASE_ ) class __UpperCamelCase : def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): _UpperCAmelCase = matrix _UpperCAmelCase = w _UpperCAmelCase = h def __str__( self ): _UpperCAmelCase = '''''' for i in range(self.__height ): ans += "|" for j in range(self.__width ): if j < self.__width - 1: ans += str(self.__matrix[i][j] ) + "," else: ans += str(self.__matrix[i][j] ) + "|\n" return ans def __add__( self , _UpperCamelCase ): if self.__width == other.width() and self.__height == other.height(): _UpperCAmelCase = [] for i in range(self.__height ): _UpperCAmelCase = [ self.__matrix[i][j] + other.component(_UpperCamelCase , _UpperCamelCase ) for j in range(self.__width ) ] matrix.append(_UpperCamelCase ) return Matrix(_UpperCamelCase , self.__width , self.__height ) else: raise Exception('''matrix must have the same dimension!''' ) def __sub__( self , _UpperCamelCase ): if self.__width == other.width() and self.__height == other.height(): _UpperCAmelCase = [] for i in range(self.__height ): _UpperCAmelCase = [ self.__matrix[i][j] - other.component(_UpperCamelCase , _UpperCamelCase ) for j in range(self.__width ) ] matrix.append(_UpperCamelCase ) return Matrix(_UpperCamelCase , self.__width , self.__height ) else: raise Exception('''matrices must have the same dimension!''' ) @overload def __mul__( self , _UpperCamelCase ): ... @overload def __mul__( self , _UpperCamelCase ): ... def __mul__( self , _UpperCamelCase ): if isinstance(_UpperCamelCase , _UpperCamelCase ): # matrix-vector if len(_UpperCamelCase ) == self.__width: _UpperCAmelCase = zero_vector(self.__height ) for i in range(self.__height ): _UpperCAmelCase = [ self.__matrix[i][j] * other.component(_UpperCamelCase ) for j in range(self.__width ) ] ans.change_component(_UpperCamelCase , sum(_UpperCamelCase ) ) return ans else: raise Exception( '''vector must have the same size as the ''' '''number of columns of the matrix!''' ) elif isinstance(_UpperCamelCase , (int, float) ): # matrix-scalar _UpperCAmelCase = [ [self.__matrix[i][j] * other for j in range(self.__width )] for i in range(self.__height ) ] return Matrix(_UpperCamelCase , self.__width , self.__height ) return None def UpperCamelCase( self ): return self.__height def UpperCamelCase( self ): return self.__width def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase ): if 0 <= x < self.__height and 0 <= y < self.__width: return self.__matrix[x][y] else: raise Exception('''change_component: indices out of bounds''' ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): if 0 <= x < self.__height and 0 <= y < self.__width: _UpperCAmelCase = value else: raise Exception('''change_component: indices out of bounds''' ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase ): if self.__height != self.__width: raise Exception('''Matrix is not square''' ) _UpperCAmelCase = self.__matrix[:x] + self.__matrix[x + 1 :] for i in range(len(_UpperCamelCase ) ): _UpperCAmelCase = minor[i][:y] + minor[i][y + 1 :] return Matrix(_UpperCamelCase , self.__width - 1 , self.__height - 1 ).determinant() def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase ): if self.__height != self.__width: raise Exception('''Matrix is not square''' ) if 0 <= x < self.__height and 0 <= y < self.__width: return (-1) ** (x + y) * self.minor(_UpperCamelCase , _UpperCamelCase ) else: raise Exception('''Indices out of bounds''' ) def UpperCamelCase( self ): if self.__height != self.__width: raise Exception('''Matrix is not square''' ) if self.__height < 1: raise Exception('''Matrix has no element''' ) elif self.__height == 1: return self.__matrix[0][0] elif self.__height == 2: return ( self.__matrix[0][0] * self.__matrix[1][1] - self.__matrix[0][1] * self.__matrix[1][0] ) else: _UpperCAmelCase = [ self.__matrix[0][y] * self.cofactor(0 , _UpperCamelCase ) for y in range(self.__width ) ] return sum(_UpperCamelCase ) def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> Matrix: """simple docstring""" _UpperCAmelCase = [[0] * n for _ in range(SCREAMING_SNAKE_CASE_ )] return Matrix(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def A__ ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> Matrix: """simple docstring""" random.seed(SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = [ [random.randint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for _ in range(SCREAMING_SNAKE_CASE_ )] for _ in range(SCREAMING_SNAKE_CASE_ ) ] return Matrix(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
32
from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __UpperCamelCase ( A__ ): __A : Dict = ["""image_processor""", """tokenizer"""] __A : List[str] = """BridgeTowerImageProcessor""" __A : str = ("""RobertaTokenizer""", """RobertaTokenizerFast""") def __init__( self , _UpperCamelCase , _UpperCamelCase ): super().__init__(_UpperCamelCase , _UpperCamelCase ) def __call__( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = True , _UpperCamelCase = False , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = 0 , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = True , _UpperCamelCase = None , **_UpperCamelCase , ): _UpperCAmelCase = self.tokenizer( text=_UpperCamelCase , add_special_tokens=_UpperCamelCase , padding=_UpperCamelCase , truncation=_UpperCamelCase , max_length=_UpperCamelCase , stride=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_token_type_ids=_UpperCamelCase , return_attention_mask=_UpperCamelCase , return_overflowing_tokens=_UpperCamelCase , return_special_tokens_mask=_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , return_length=_UpperCamelCase , verbose=_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase , ) # add pixel_values + pixel_mask _UpperCAmelCase = self.image_processor( _UpperCamelCase , return_tensors=_UpperCamelCase , do_normalize=_UpperCamelCase , do_center_crop=_UpperCamelCase , **_UpperCamelCase ) encoding.update(_UpperCamelCase ) return encoding def UpperCamelCase( self , *_UpperCamelCase , **_UpperCamelCase ): return self.tokenizer.batch_decode(*_UpperCamelCase , **_UpperCamelCase ) def UpperCamelCase( self , *_UpperCamelCase , **_UpperCamelCase ): return self.tokenizer.decode(*_UpperCamelCase , **_UpperCamelCase ) @property def UpperCamelCase( self ): _UpperCAmelCase = self.tokenizer.model_input_names _UpperCAmelCase = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
32
1
import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast @require_vision class __UpperCamelCase ( unittest.TestCase ): def UpperCamelCase( self ): _UpperCAmelCase = tempfile.mkdtemp() _UpperCAmelCase = BlipImageProcessor() _UpperCAmelCase = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-BertModel''' ) _UpperCAmelCase = BlipProcessor(_UpperCamelCase , _UpperCamelCase ) processor.save_pretrained(self.tmpdirname ) def UpperCamelCase( self , **_UpperCamelCase ): return AutoProcessor.from_pretrained(self.tmpdirname , **_UpperCamelCase ).tokenizer def UpperCamelCase( self , **_UpperCamelCase ): return AutoProcessor.from_pretrained(self.tmpdirname , **_UpperCamelCase ).image_processor def UpperCamelCase( self ): shutil.rmtree(self.tmpdirname ) def UpperCamelCase( self ): _UpperCAmelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] _UpperCAmelCase = [Image.fromarray(np.moveaxis(_UpperCamelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def UpperCamelCase( self ): _UpperCAmelCase = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _UpperCAmelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) _UpperCAmelCase = self.get_image_processor(do_normalize=_UpperCamelCase , padding_value=1.0 ) _UpperCAmelCase = BlipProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_UpperCamelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , _UpperCamelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _UpperCamelCase ) def UpperCamelCase( self ): _UpperCAmelCase = self.get_image_processor() _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = BlipProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase ) _UpperCAmelCase = self.prepare_image_inputs() _UpperCAmelCase = image_processor(_UpperCamelCase , return_tensors='''np''' ) _UpperCAmelCase = processor(images=_UpperCamelCase , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def UpperCamelCase( self ): _UpperCAmelCase = self.get_image_processor() _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = BlipProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase ) _UpperCAmelCase = '''lower newer''' _UpperCAmelCase = processor(text=_UpperCamelCase ) _UpperCAmelCase = tokenizer(_UpperCamelCase , return_token_type_ids=_UpperCamelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def UpperCamelCase( self ): _UpperCAmelCase = self.get_image_processor() _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = BlipProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase ) _UpperCAmelCase = '''lower newer''' _UpperCAmelCase = self.prepare_image_inputs() _UpperCAmelCase = processor(text=_UpperCamelCase , images=_UpperCamelCase ) self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] ) # test if it raises when no input is passed with pytest.raises(_UpperCamelCase ): processor() def UpperCamelCase( self ): _UpperCAmelCase = self.get_image_processor() _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = BlipProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase ) _UpperCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _UpperCAmelCase = processor.batch_decode(_UpperCamelCase ) _UpperCAmelCase = tokenizer.batch_decode(_UpperCamelCase ) self.assertListEqual(_UpperCamelCase , _UpperCamelCase ) def UpperCamelCase( self ): _UpperCAmelCase = self.get_image_processor() _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = BlipProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase ) _UpperCAmelCase = '''lower newer''' _UpperCAmelCase = self.prepare_image_inputs() _UpperCAmelCase = processor(text=_UpperCamelCase , images=_UpperCamelCase ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
32
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available UpperCAmelCase_ = { "configuration_xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMOnnxConfig"], "tokenization_xlm": ["XLMTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ "XLM_PRETRAINED_MODEL_ARCHIVE_LIST", "XLMForMultipleChoice", "XLMForQuestionAnswering", "XLMForQuestionAnsweringSimple", "XLMForSequenceClassification", "XLMForTokenClassification", "XLMModel", "XLMPreTrainedModel", "XLMWithLMHeadModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ "TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST", "TFXLMForMultipleChoice", "TFXLMForQuestionAnsweringSimple", "TFXLMForSequenceClassification", "TFXLMForTokenClassification", "TFXLMMainLayer", "TFXLMModel", "TFXLMPreTrainedModel", "TFXLMWithLMHeadModel", ] if TYPE_CHECKING: from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig from .tokenization_xlm import XLMTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm import ( XLM_PRETRAINED_MODEL_ARCHIVE_LIST, XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMPreTrainedModel, XLMWithLMHeadModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm import ( TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMForMultipleChoice, TFXLMForQuestionAnsweringSimple, TFXLMForSequenceClassification, TFXLMForTokenClassification, TFXLMMainLayer, TFXLMModel, TFXLMPreTrainedModel, TFXLMWithLMHeadModel, ) else: import sys UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
32
1
import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from ...test_tokenization_common import TokenizerTesterMixin UpperCAmelCase_ = get_tests_dir("fixtures/test_sentencepiece.model") if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right UpperCAmelCase_ = 25_00_04 UpperCAmelCase_ = 25_00_20 @require_sentencepiece @require_tokenizers class __UpperCamelCase ( A__ , unittest.TestCase ): __A : Union[str, Any] = MBartaaTokenizer __A : Any = MBartaaTokenizerFast __A : Dict = True __A : Any = True def UpperCamelCase( self ): super().setUp() # We have a SentencePiece fixture for testing _UpperCAmelCase = MBartaaTokenizer(_UpperCamelCase , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=_UpperCamelCase ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCamelCase( self ): _UpperCAmelCase = '''<s>''' _UpperCAmelCase = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCamelCase ) , _UpperCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCamelCase ) , _UpperCamelCase ) def UpperCamelCase( self ): _UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<s>''' ) self.assertEqual(vocab_keys[1] , '''<pad>''' ) self.assertEqual(vocab_keys[-1] , '''<mask>''' ) self.assertEqual(len(_UpperCamelCase ) , 1054 ) def UpperCamelCase( self ): self.assertEqual(self.get_tokenizer().vocab_size , 1054 ) def UpperCamelCase( self ): _UpperCAmelCase = MBartaaTokenizer(_UpperCamelCase , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=_UpperCamelCase ) _UpperCAmelCase = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(_UpperCamelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) _UpperCAmelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( _UpperCamelCase , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''] , ) _UpperCAmelCase = tokenizer.convert_tokens_to_ids(_UpperCamelCase ) self.assertListEqual( _UpperCamelCase , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) _UpperCAmelCase = tokenizer.convert_ids_to_tokens(_UpperCamelCase ) self.assertListEqual( _UpperCamelCase , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''] , ) @slow def UpperCamelCase( self ): # fmt: off _UpperCAmelCase = {'''input_ids''': [[250004, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [250004, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [250004, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_UpperCamelCase , model_name='''facebook/mbart-large-50''' , revision='''d3913889c59cd5c9e456b269c376325eabad57e2''' , ) def UpperCamelCase( self ): if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return _UpperCAmelCase = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart50''', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): _UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(_UpperCamelCase , **_UpperCamelCase ) _UpperCAmelCase = self.tokenizer_class.from_pretrained(_UpperCamelCase , **_UpperCamelCase ) _UpperCAmelCase = tempfile.mkdtemp() _UpperCAmelCase = tokenizer_r.save_pretrained(_UpperCamelCase ) _UpperCAmelCase = tokenizer_p.save_pretrained(_UpperCamelCase ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) _UpperCAmelCase = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f ) self.assertSequenceEqual(_UpperCamelCase , _UpperCamelCase ) # Checks everything loads correctly in the same way _UpperCAmelCase = tokenizer_r.from_pretrained(_UpperCamelCase ) _UpperCAmelCase = tokenizer_p.from_pretrained(_UpperCamelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_UpperCamelCase , _UpperCamelCase ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(_UpperCamelCase ) # Save tokenizer rust, legacy_format=True _UpperCAmelCase = tempfile.mkdtemp() _UpperCAmelCase = tokenizer_r.save_pretrained(_UpperCamelCase , legacy_format=_UpperCamelCase ) _UpperCAmelCase = tokenizer_p.save_pretrained(_UpperCamelCase ) # Checks it save with the same files self.assertSequenceEqual(_UpperCamelCase , _UpperCamelCase ) # Checks everything loads correctly in the same way _UpperCAmelCase = tokenizer_r.from_pretrained(_UpperCamelCase ) _UpperCAmelCase = tokenizer_p.from_pretrained(_UpperCamelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_UpperCamelCase , _UpperCamelCase ) ) shutil.rmtree(_UpperCamelCase ) # Save tokenizer rust, legacy_format=False _UpperCAmelCase = tempfile.mkdtemp() _UpperCAmelCase = tokenizer_r.save_pretrained(_UpperCamelCase , legacy_format=_UpperCamelCase ) _UpperCAmelCase = tokenizer_p.save_pretrained(_UpperCamelCase ) # Checks it saved the tokenizer.json file self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way _UpperCAmelCase = tokenizer_r.from_pretrained(_UpperCamelCase ) _UpperCAmelCase = tokenizer_p.from_pretrained(_UpperCamelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_UpperCamelCase , _UpperCamelCase ) ) shutil.rmtree(_UpperCamelCase ) @require_torch @require_sentencepiece @require_tokenizers class __UpperCamelCase ( unittest.TestCase ): __A : Dict = """facebook/mbart-large-50-one-to-many-mmt""" __A : Optional[int] = [ """ UN Chief Says There Is No Military Solution in Syria""", """ Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""", ] __A : Optional[int] = [ """Şeful ONU declară că nu există o soluţie militară în Siria""", """Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei""" """ pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor""" """ face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""", ] __A : Any = [EN_CODE, 82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2] @classmethod def UpperCamelCase( cls ): _UpperCAmelCase = MBartaaTokenizer.from_pretrained( cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' ) _UpperCAmelCase = 1 return cls def UpperCamelCase( self ): self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 250001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 250004 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 250020 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''mr_IN'''] , 250038 ) def UpperCamelCase( self ): _UpperCAmelCase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , _UpperCamelCase ) def UpperCamelCase( self ): self.assertIn(_UpperCamelCase , self.tokenizer.all_special_ids ) _UpperCAmelCase = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2] _UpperCAmelCase = self.tokenizer.decode(_UpperCamelCase , skip_special_tokens=_UpperCamelCase ) _UpperCAmelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_UpperCamelCase ) self.assertEqual(_UpperCamelCase , _UpperCamelCase ) self.assertNotIn(self.tokenizer.eos_token , _UpperCamelCase ) def UpperCamelCase( self ): _UpperCAmelCase = ['''this is gunna be a long sentence ''' * 20] assert isinstance(src_text[0] , _UpperCamelCase ) _UpperCAmelCase = 10 _UpperCAmelCase = self.tokenizer(_UpperCamelCase , max_length=_UpperCamelCase , truncation=_UpperCamelCase ).input_ids[0] self.assertEqual(ids[0] , _UpperCamelCase ) self.assertEqual(ids[-1] , 2 ) self.assertEqual(len(_UpperCamelCase ) , _UpperCamelCase ) def UpperCamelCase( self ): self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [250053, 250001] ) def UpperCamelCase( self ): _UpperCAmelCase = tempfile.mkdtemp() _UpperCAmelCase = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(_UpperCamelCase ) _UpperCAmelCase = MBartaaTokenizer.from_pretrained(_UpperCamelCase ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _UpperCamelCase ) @require_torch def UpperCamelCase( self ): _UpperCAmelCase = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_UpperCamelCase , return_tensors='''pt''' ) _UpperCAmelCase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == RO_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE] @require_torch def UpperCamelCase( self ): _UpperCAmelCase = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=_UpperCamelCase , truncation=_UpperCamelCase , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , ) _UpperCAmelCase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id ) self.assertIsInstance(_UpperCamelCase , _UpperCamelCase ) self.assertEqual((2, 14) , batch.input_ids.shape ) self.assertEqual((2, 14) , batch.attention_mask.shape ) _UpperCAmelCase = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , _UpperCamelCase ) self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) def UpperCamelCase( self ): _UpperCAmelCase = self.tokenizer(self.src_text , padding=_UpperCamelCase , truncation=_UpperCamelCase , max_length=3 , return_tensors='''pt''' ) _UpperCAmelCase = self.tokenizer( text_target=self.tgt_text , padding=_UpperCamelCase , truncation=_UpperCamelCase , max_length=10 , return_tensors='''pt''' ) _UpperCAmelCase = targets['''input_ids'''] _UpperCAmelCase = shift_tokens_right(_UpperCamelCase , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def UpperCamelCase( self ): _UpperCAmelCase = self.tokenizer._build_translation_inputs( '''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' ) self.assertEqual( nested_simplify(_UpperCamelCase ) , { # en_XX, A, test, EOS '''input_ids''': [[250004, 62, 3034, 2]], '''attention_mask''': [[1, 1, 1, 1]], # ar_AR '''forced_bos_token_id''': 250001, } , )
32
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { "microsoft/biogpt": "https://huggingface.co/microsoft/biogpt/resolve/main/config.json", # See all BioGPT models at https://huggingface.co/models?filter=biogpt } class __UpperCamelCase ( A__ ): __A : Any = """biogpt""" def __init__( self , _UpperCamelCase=42384 , _UpperCamelCase=1024 , _UpperCamelCase=24 , _UpperCamelCase=16 , _UpperCamelCase=4096 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=1024 , _UpperCamelCase=0.02 , _UpperCamelCase=1e-12 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=0.0 , _UpperCamelCase=0.0 , _UpperCamelCase=1 , _UpperCamelCase=0 , _UpperCamelCase=2 , **_UpperCamelCase , ): _UpperCAmelCase = vocab_size _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = scale_embedding _UpperCAmelCase = use_cache _UpperCAmelCase = layerdrop _UpperCAmelCase = activation_dropout super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
32
1
import sys from collections import defaultdict class __UpperCamelCase : def __init__( self ): _UpperCAmelCase = [] def UpperCamelCase( self , _UpperCamelCase ): return self.node_position[vertex] def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase ): _UpperCAmelCase = pos def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): if start > size // 2 - 1: return else: if 2 * start + 2 >= size: _UpperCAmelCase = 2 * start + 1 else: if heap[2 * start + 1] < heap[2 * start + 2]: _UpperCAmelCase = 2 * start + 1 else: _UpperCAmelCase = 2 * start + 2 if heap[smallest_child] < heap[start]: _UpperCAmelCase , _UpperCAmelCase = heap[smallest_child], positions[smallest_child] _UpperCAmelCase , _UpperCAmelCase = ( heap[start], positions[start], ) _UpperCAmelCase , _UpperCAmelCase = temp, tempa _UpperCAmelCase = self.get_position(positions[smallest_child] ) self.set_position( positions[smallest_child] , self.get_position(positions[start] ) ) self.set_position(positions[start] , _UpperCamelCase ) self.top_to_bottom(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): _UpperCAmelCase = position[index] while index != 0: _UpperCAmelCase = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 ) if val < heap[parent]: _UpperCAmelCase = heap[parent] _UpperCAmelCase = position[parent] self.set_position(position[parent] , _UpperCamelCase ) else: _UpperCAmelCase = val _UpperCAmelCase = temp self.set_position(_UpperCamelCase , _UpperCamelCase ) break _UpperCAmelCase = parent else: _UpperCAmelCase = val _UpperCAmelCase = temp self.set_position(_UpperCamelCase , 0 ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase ): _UpperCAmelCase = len(_UpperCamelCase ) // 2 - 1 for i in range(_UpperCamelCase , -1 , -1 ): self.top_to_bottom(_UpperCamelCase , _UpperCamelCase , len(_UpperCamelCase ) , _UpperCamelCase ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase ): _UpperCAmelCase = positions[0] _UpperCAmelCase = sys.maxsize self.top_to_bottom(_UpperCamelCase , 0 , len(_UpperCamelCase ) , _UpperCamelCase ) return temp def A__ ( SCREAMING_SNAKE_CASE_ : Dict ) -> Dict: """simple docstring""" _UpperCAmelCase = Heap() _UpperCAmelCase = [0] * len(SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = [-1] * len(SCREAMING_SNAKE_CASE_ ) # Neighboring Tree Vertex of selected vertex # Minimum Distance of explored vertex with neighboring vertex of partial tree # formed in graph _UpperCAmelCase = [] # Heap of Distance of vertices from their neighboring vertex _UpperCAmelCase = [] for vertex in range(len(SCREAMING_SNAKE_CASE_ ) ): distance_tv.append(sys.maxsize ) positions.append(SCREAMING_SNAKE_CASE_ ) heap.node_position.append(SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = [] _UpperCAmelCase = 1 _UpperCAmelCase = sys.maxsize for neighbor, distance in adjacency_list[0]: _UpperCAmelCase = 0 _UpperCAmelCase = distance heap.heapify(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for _ in range(1 , len(SCREAMING_SNAKE_CASE_ ) ): _UpperCAmelCase = heap.delete_minimum(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if visited[vertex] == 0: tree_edges.append((nbr_tv[vertex], vertex) ) _UpperCAmelCase = 1 for neighbor, distance in adjacency_list[vertex]: if ( visited[neighbor] == 0 and distance < distance_tv[heap.get_position(SCREAMING_SNAKE_CASE_ )] ): _UpperCAmelCase = distance heap.bottom_to_top( SCREAMING_SNAKE_CASE_ , heap.get_position(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = vertex return tree_edges if __name__ == "__main__": # pragma: no cover # < --------- Prims Algorithm --------- > UpperCAmelCase_ = int(input("Enter number of edges: ").strip()) UpperCAmelCase_ = defaultdict(list) for _ in range(edges_number): UpperCAmelCase_ = [int(x) for x in input().strip().split()] adjacency_list[edge[0]].append([edge[1], edge[2]]) adjacency_list[edge[1]].append([edge[0], edge[2]]) print(prisms_algorithm(adjacency_list))
32
from typing import List from .keymap import KEYMAP, get_character def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> List[str]: """simple docstring""" def decorator(SCREAMING_SNAKE_CASE_ : List[Any] ): _UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''handle_key''' , [] ) handle += [key] setattr(SCREAMING_SNAKE_CASE_ , '''handle_key''' , SCREAMING_SNAKE_CASE_ ) return func return decorator def A__ ( *SCREAMING_SNAKE_CASE_ : List[str] ) -> Dict: """simple docstring""" def decorator(SCREAMING_SNAKE_CASE_ : Any ): _UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''handle_key''' , [] ) handle += keys setattr(SCREAMING_SNAKE_CASE_ , '''handle_key''' , SCREAMING_SNAKE_CASE_ ) return func return decorator class __UpperCamelCase ( A__ ): def __new__( cls , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): _UpperCAmelCase = super().__new__(cls , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) if not hasattr(_UpperCamelCase , '''key_handler''' ): setattr(_UpperCamelCase , '''key_handler''' , {} ) setattr(_UpperCamelCase , '''handle_input''' , KeyHandler.handle_input ) for value in attrs.values(): _UpperCAmelCase = getattr(_UpperCamelCase , '''handle_key''' , [] ) for key in handled_keys: _UpperCAmelCase = value return new_cls @staticmethod def UpperCamelCase( cls ): _UpperCAmelCase = get_character() if char != KEYMAP["undefined"]: _UpperCAmelCase = ord(_UpperCamelCase ) _UpperCAmelCase = cls.key_handler.get(_UpperCamelCase ) if handler: _UpperCAmelCase = char return handler(cls ) else: return None def A__ ( cls : Union[str, Any] ) -> Any: """simple docstring""" return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
32
1
from __future__ import annotations def A__ ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float ) -> float: """simple docstring""" if days_between_payments <= 0: raise ValueError('''days_between_payments must be > 0''' ) if daily_interest_rate < 0: raise ValueError('''daily_interest_rate must be >= 0''' ) if principal <= 0: raise ValueError('''principal must be > 0''' ) return principal * daily_interest_rate * days_between_payments def A__ ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , ) -> float: """simple docstring""" if number_of_compounding_periods <= 0: raise ValueError('''number_of_compounding_periods must be > 0''' ) if nominal_annual_interest_rate_percentage < 0: raise ValueError('''nominal_annual_interest_rate_percentage must be >= 0''' ) if principal <= 0: raise ValueError('''principal must be > 0''' ) return principal * ( (1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods - 1 ) def A__ ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , ) -> float: """simple docstring""" if number_of_years <= 0: raise ValueError('''number_of_years must be > 0''' ) if nominal_annual_percentage_rate < 0: raise ValueError('''nominal_annual_percentage_rate must be >= 0''' ) if principal <= 0: raise ValueError('''principal must be > 0''' ) return compound_interest( SCREAMING_SNAKE_CASE_ , nominal_annual_percentage_rate / 3_65 , number_of_years * 3_65 ) if __name__ == "__main__": import doctest doctest.testmod()
32
import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class __UpperCamelCase : def __init__( self , _UpperCamelCase , _UpperCamelCase=13 , _UpperCamelCase=7 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=99 , _UpperCamelCase=24 , _UpperCamelCase=2 , _UpperCamelCase=6 , _UpperCamelCase=37 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=512 , _UpperCamelCase=16 , _UpperCamelCase=2 , _UpperCamelCase=0.02 , _UpperCamelCase=3 , _UpperCamelCase=None , _UpperCamelCase=1000 , ): _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = seq_length _UpperCAmelCase = is_training _UpperCAmelCase = use_input_mask _UpperCAmelCase = use_token_type_ids _UpperCAmelCase = use_labels _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = type_sequence_label_size _UpperCAmelCase = initializer_range _UpperCAmelCase = num_labels _UpperCAmelCase = scope _UpperCAmelCase = range_bbox def UpperCamelCase( self ): _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: _UpperCAmelCase = bbox[i, j, 3] _UpperCAmelCase = bbox[i, j, 1] _UpperCAmelCase = t if bbox[i, j, 2] < bbox[i, j, 0]: _UpperCAmelCase = bbox[i, j, 2] _UpperCAmelCase = bbox[i, j, 0] _UpperCAmelCase = t _UpperCAmelCase = None if self.use_input_mask: _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) _UpperCAmelCase = None if self.use_token_type_ids: _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _UpperCAmelCase = None _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _UpperCAmelCase = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def UpperCamelCase( self ): return LiltConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ): _UpperCAmelCase = LiltModel(config=_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() _UpperCAmelCase = model(_UpperCamelCase , bbox=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase ) _UpperCAmelCase = model(_UpperCamelCase , bbox=_UpperCamelCase , token_type_ids=_UpperCamelCase ) _UpperCAmelCase = model(_UpperCamelCase , bbox=_UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ): _UpperCAmelCase = self.num_labels _UpperCAmelCase = LiltForTokenClassification(config=_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() _UpperCAmelCase = model( _UpperCamelCase , bbox=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ): _UpperCAmelCase = LiltForQuestionAnswering(config=_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() _UpperCAmelCase = model( _UpperCamelCase , bbox=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , start_positions=_UpperCamelCase , end_positions=_UpperCamelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCamelCase( self ): _UpperCAmelCase = self.prepare_config_and_inputs() ( ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ) = config_and_inputs _UpperCAmelCase = { '''input_ids''': input_ids, '''bbox''': bbox, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask, } return config, inputs_dict @require_torch class __UpperCamelCase ( A__ , A__ , A__ , unittest.TestCase ): __A : Dict = ( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) __A : Optional[Any] = ( { """feature-extraction""": LiltModel, """question-answering""": LiltForQuestionAnswering, """text-classification""": LiltForSequenceClassification, """token-classification""": LiltForTokenClassification, """zero-shot""": LiltForSequenceClassification, } if is_torch_available() else {} ) __A : List[Any] = False __A : Optional[int] = False def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): return True def UpperCamelCase( self ): _UpperCAmelCase = LiltModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=_UpperCamelCase , hidden_size=37 ) def UpperCamelCase( self ): self.config_tester.run_common_tests() def UpperCamelCase( self ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCamelCase ) def UpperCamelCase( self ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: _UpperCAmelCase = type self.model_tester.create_and_check_model(*_UpperCamelCase ) def UpperCamelCase( self ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_UpperCamelCase ) def UpperCamelCase( self ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_UpperCamelCase ) @slow def UpperCamelCase( self ): for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCAmelCase = LiltModel.from_pretrained(_UpperCamelCase ) self.assertIsNotNone(_UpperCamelCase ) @require_torch @slow class __UpperCamelCase ( unittest.TestCase ): def UpperCamelCase( self ): _UpperCAmelCase = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(_UpperCamelCase ) _UpperCAmelCase = torch.tensor([[1, 2]] , device=_UpperCamelCase ) _UpperCAmelCase = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=_UpperCamelCase ) # forward pass with torch.no_grad(): _UpperCAmelCase = model(input_ids=_UpperCamelCase , bbox=_UpperCamelCase ) _UpperCAmelCase = torch.Size([1, 2, 768] ) _UpperCAmelCase = torch.tensor( [[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=_UpperCamelCase , ) self.assertTrue(outputs.last_hidden_state.shape , _UpperCamelCase ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , _UpperCamelCase , atol=1e-3 ) )
32
1
UpperCAmelCase_ = {"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []} UpperCAmelCase_ = ["a", "b", "c", "d", "e"] def A__ ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ) -> Any: """simple docstring""" _UpperCAmelCase = start # add current to visited visited.append(SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = edges[current] for neighbor in neighbors: # if neighbor not in visited, visit if neighbor not in visited: _UpperCAmelCase = topological_sort(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # if all neighbors visited add current to sort sort.append(SCREAMING_SNAKE_CASE_ ) # if all vertices haven't been visited select a new one to visit if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ): for vertice in vertices: if vertice not in visited: _UpperCAmelCase = topological_sort(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # return sort return sort if __name__ == "__main__": UpperCAmelCase_ = topological_sort("a", [], []) print(sort)
32
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { "RWKV/rwkv-4-169m-pile": "https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json", "RWKV/rwkv-4-430m-pile": "https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json", "RWKV/rwkv-4-1b5-pile": "https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json", "RWKV/rwkv-4-3b-pile": "https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json", "RWKV/rwkv-4-7b-pile": "https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json", "RWKV/rwkv-4-14b-pile": "https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json", "RWKV/rwkv-raven-1b5": "https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json", "RWKV/rwkv-raven-3b": "https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json", "RWKV/rwkv-raven-7b": "https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json", "RWKV/rwkv-raven-14b": "https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json", } class __UpperCamelCase ( A__ ): __A : Tuple = """rwkv""" __A : Any = {"""max_position_embeddings""": """context_length"""} def __init__( self , _UpperCamelCase=50277 , _UpperCamelCase=1024 , _UpperCamelCase=4096 , _UpperCamelCase=32 , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=1e-5 , _UpperCamelCase=0 , _UpperCamelCase=0 , _UpperCamelCase=6 , _UpperCamelCase=False , _UpperCamelCase=True , **_UpperCamelCase , ): _UpperCAmelCase = vocab_size _UpperCAmelCase = context_length _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = attention_hidden_size if attention_hidden_size is not None else hidden_size _UpperCAmelCase = intermediate_size if intermediate_size is not None else 4 * hidden_size _UpperCAmelCase = layer_norm_epsilon _UpperCAmelCase = rescale_every _UpperCAmelCase = use_cache _UpperCAmelCase = bos_token_id _UpperCAmelCase = eos_token_id super().__init__( tie_word_embeddings=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
32
1
def A__ ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Tuple ) -> List[Any]: """simple docstring""" _UpperCAmelCase = [0 for i in range(r + 1 )] # nc0 = 1 _UpperCAmelCase = 1 for i in range(1 , n + 1 ): # to compute current row from previous row. _UpperCAmelCase = min(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) while j > 0: c[j] += c[j - 1] j -= 1 return c[r] print(binomial_coefficient(n=10, r=5))
32
def A__ ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> str: """simple docstring""" if a < 0 or b < 0: raise ValueError('''the value of both inputs must be positive''' ) _UpperCAmelCase = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] # remove the leading "0b" _UpperCAmelCase = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] # remove the leading "0b" _UpperCAmelCase = max(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) ) return "0b" + "".join( str(int(char_a == '''1''' and char_b == '''1''' ) ) for char_a, char_b in zip(a_binary.zfill(SCREAMING_SNAKE_CASE_ ) , b_binary.zfill(SCREAMING_SNAKE_CASE_ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
32
1
import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging UpperCAmelCase_ = logging.get_logger(__name__) def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> int: """simple docstring""" _UpperCAmelCase = R'''\w+[.]\d+''' _UpperCAmelCase = re.findall(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for pat in pats: _UpperCAmelCase = key.replace(SCREAMING_SNAKE_CASE_ , '''_'''.join(pat.split('''.''' ) ) ) return key def A__ ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Tuple: """simple docstring""" _UpperCAmelCase = pt_tuple_key[:-1] + ('''scale''',) if ( any('''norm''' in str_ for str_ in pt_tuple_key ) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): _UpperCAmelCase = pt_tuple_key[:-1] + ('''scale''',) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: _UpperCAmelCase = pt_tuple_key[:-1] + ('''scale''',) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: _UpperCAmelCase = pt_tuple_key[:-1] + ('''embedding''',) return renamed_pt_tuple_key, pt_tensor # conv layer _UpperCAmelCase = pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: _UpperCAmelCase = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer _UpperCAmelCase = pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight": _UpperCAmelCase = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight _UpperCAmelCase = pt_tuple_key[:-1] + ('''weight''',) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias _UpperCAmelCase = pt_tuple_key[:-1] + ('''bias''',) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def A__ ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Dict=42 ) -> List[Any]: """simple docstring""" _UpperCAmelCase = {k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params _UpperCAmelCase = flax_model.init_weights(PRNGKey(SCREAMING_SNAKE_CASE_ ) ) _UpperCAmelCase = flatten_dict(SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = {} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): _UpperCAmelCase = rename_key(SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = tuple(renamed_pt_key.split('''.''' ) ) # Correctly rename weight parameters _UpperCAmelCase , _UpperCAmelCase = rename_key_and_reshape_tensor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # also add unexpected weight so that warning is thrown _UpperCAmelCase = jnp.asarray(SCREAMING_SNAKE_CASE_ ) return unflatten_dict(SCREAMING_SNAKE_CASE_ )
32
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { "tiiuae/falcon-40b": "https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json", "tiiuae/falcon-7b": "https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json", } class __UpperCamelCase ( A__ ): __A : Dict = """falcon""" __A : Any = ["""past_key_values"""] def __init__( self , _UpperCamelCase=65024 , _UpperCamelCase=4544 , _UpperCamelCase=32 , _UpperCamelCase=71 , _UpperCamelCase=1e-5 , _UpperCamelCase=0.02 , _UpperCamelCase=True , _UpperCamelCase=0.0 , _UpperCamelCase=0.0 , _UpperCamelCase=None , _UpperCamelCase=False , _UpperCamelCase=False , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=False , _UpperCamelCase=11 , _UpperCamelCase=11 , **_UpperCamelCase , ): _UpperCAmelCase = vocab_size # Backward compatibility with n_embed kwarg _UpperCAmelCase = kwargs.pop('''n_embed''' , _UpperCamelCase ) _UpperCAmelCase = hidden_size if n_embed is None else n_embed _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = layer_norm_epsilon _UpperCAmelCase = initializer_range _UpperCAmelCase = use_cache _UpperCAmelCase = hidden_dropout _UpperCAmelCase = attention_dropout _UpperCAmelCase = bos_token_id _UpperCAmelCase = eos_token_id _UpperCAmelCase = num_attention_heads if num_kv_heads is None else num_kv_heads _UpperCAmelCase = alibi _UpperCAmelCase = new_decoder_architecture _UpperCAmelCase = multi_query # Ignored when new_decoder_architecture is True _UpperCAmelCase = parallel_attn _UpperCAmelCase = bias super().__init__(bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase ) @property def UpperCamelCase( self ): return self.hidden_size // self.num_attention_heads @property def UpperCamelCase( self ): return not self.alibi
32
1
import argparse import torch from torch import nn from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration def A__ ( SCREAMING_SNAKE_CASE_ : Dict ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = [ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''decoder.output_projection.weight''', '''_float_tensor''', '''encoder.embed_positions._float_tensor''', '''decoder.embed_positions._float_tensor''', ] for k in ignore_keys: state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> int: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = emb.weight.shape _UpperCAmelCase = nn.Linear(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , bias=SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = emb.weight.data return lin_layer def A__ ( SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Any: """simple docstring""" _UpperCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ , map_location='''cpu''' ) _UpperCAmelCase = mam_aaa['''args'''] or mam_aaa['''cfg''']['''model'''] _UpperCAmelCase = mam_aaa['''model'''] remove_ignore_keys_(SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = state_dict['''encoder.embed_tokens.weight'''].shape[0] _UpperCAmelCase = MaMaaaConfig( vocab_size=SCREAMING_SNAKE_CASE_ , max_position_embeddings=10_24 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , ) _UpperCAmelCase = state_dict['''decoder.embed_tokens.weight'''] _UpperCAmelCase = MaMaaaForConditionalGeneration(SCREAMING_SNAKE_CASE_ ) model.model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.") parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") UpperCAmelCase_ = parser.parse_args() UpperCAmelCase_ = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß) model.save_pretrained(args.pytorch_dump_folder_path)
32
from math import sqrt def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> bool: """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def A__ ( SCREAMING_SNAKE_CASE_ : int = 1_00_01 ) -> int: """simple docstring""" _UpperCAmelCase = 0 _UpperCAmelCase = 1 while count != nth and number < 3: number += 1 if is_prime(SCREAMING_SNAKE_CASE_ ): count += 1 while count != nth: number += 2 if is_prime(SCREAMING_SNAKE_CASE_ ): count += 1 return number if __name__ == "__main__": print(f'''{solution() = }''')
32
1
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { "microsoft/wavlm-base": "https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json", # See all WavLM models at https://huggingface.co/models?filter=wavlm } class __UpperCamelCase ( A__ ): __A : Union[str, Any] = """wavlm""" def __init__( self , _UpperCamelCase=32 , _UpperCamelCase=768 , _UpperCamelCase=12 , _UpperCamelCase=12 , _UpperCamelCase=3072 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=0.0 , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=0.02 , _UpperCamelCase=1e-5 , _UpperCamelCase="group" , _UpperCamelCase="gelu" , _UpperCamelCase=(512, 512, 512, 512, 512, 512, 512) , _UpperCamelCase=(5, 2, 2, 2, 2, 2, 2) , _UpperCamelCase=(10, 3, 3, 3, 3, 2, 2) , _UpperCamelCase=False , _UpperCamelCase=128 , _UpperCamelCase=16 , _UpperCamelCase=320 , _UpperCamelCase=800 , _UpperCamelCase=False , _UpperCamelCase=True , _UpperCamelCase=0.05 , _UpperCamelCase=10 , _UpperCamelCase=2 , _UpperCamelCase=0.0 , _UpperCamelCase=10 , _UpperCamelCase=320 , _UpperCamelCase=2 , _UpperCamelCase=0.1 , _UpperCamelCase=100 , _UpperCamelCase=256 , _UpperCamelCase=256 , _UpperCamelCase=0.1 , _UpperCamelCase="mean" , _UpperCamelCase=False , _UpperCamelCase=False , _UpperCamelCase=256 , _UpperCamelCase=(512, 512, 512, 512, 1500) , _UpperCamelCase=(5, 3, 3, 1, 1) , _UpperCamelCase=(1, 2, 3, 1, 1) , _UpperCamelCase=512 , _UpperCamelCase=80 , _UpperCamelCase=0 , _UpperCamelCase=1 , _UpperCamelCase=2 , _UpperCamelCase=False , _UpperCamelCase=3 , _UpperCamelCase=2 , _UpperCamelCase=3 , _UpperCamelCase=None , **_UpperCamelCase , ): super().__init__(**_UpperCamelCase , pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase ) _UpperCAmelCase = hidden_size _UpperCAmelCase = feat_extract_norm _UpperCAmelCase = feat_extract_activation _UpperCAmelCase = list(_UpperCamelCase ) _UpperCAmelCase = list(_UpperCamelCase ) _UpperCAmelCase = list(_UpperCamelCase ) _UpperCAmelCase = conv_bias _UpperCAmelCase = num_buckets _UpperCAmelCase = max_bucket_distance _UpperCAmelCase = num_conv_pos_embeddings _UpperCAmelCase = num_conv_pos_embedding_groups _UpperCAmelCase = len(self.conv_dim ) _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = num_attention_heads _UpperCAmelCase = hidden_dropout _UpperCAmelCase = attention_dropout _UpperCAmelCase = activation_dropout _UpperCAmelCase = feat_proj_dropout _UpperCAmelCase = final_dropout _UpperCAmelCase = layerdrop _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = initializer_range _UpperCAmelCase = num_ctc_classes _UpperCAmelCase = vocab_size _UpperCAmelCase = do_stable_layer_norm _UpperCAmelCase = use_weighted_layer_sum _UpperCAmelCase = classifier_proj_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==''' ''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =''' f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,''' f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _UpperCAmelCase = apply_spec_augment _UpperCAmelCase = mask_time_prob _UpperCAmelCase = mask_time_length _UpperCAmelCase = mask_time_min_masks _UpperCAmelCase = mask_feature_prob _UpperCAmelCase = mask_feature_length # parameters for pretraining with codevector quantized representations _UpperCAmelCase = num_codevectors_per_group _UpperCAmelCase = num_codevector_groups _UpperCAmelCase = contrastive_logits_temperature _UpperCAmelCase = num_negatives _UpperCAmelCase = codevector_dim _UpperCAmelCase = proj_codevector_dim _UpperCAmelCase = diversity_loss_weight # ctc loss _UpperCAmelCase = ctc_loss_reduction _UpperCAmelCase = ctc_zero_infinity # adapter _UpperCAmelCase = add_adapter _UpperCAmelCase = adapter_kernel_size _UpperCAmelCase = adapter_stride _UpperCAmelCase = num_adapter_layers _UpperCAmelCase = output_hidden_size or hidden_size # SequenceClassification-specific parameter. Feel free to ignore for other classes. _UpperCAmelCase = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. _UpperCAmelCase = list(_UpperCamelCase ) _UpperCAmelCase = list(_UpperCamelCase ) _UpperCAmelCase = list(_UpperCamelCase ) _UpperCAmelCase = xvector_output_dim @property def UpperCamelCase( self ): return functools.reduce(operator.mul , self.conv_stride , 1 )
32
def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> bool: """simple docstring""" if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): _UpperCAmelCase = F'''Input value of [number={number}] must be an integer''' raise TypeError(SCREAMING_SNAKE_CASE_ ) if number < 0: return False _UpperCAmelCase = number * number while number > 0: if number % 10 != number_square % 10: return False number //= 10 number_square //= 10 return True if __name__ == "__main__": import doctest doctest.testmod()
32
1
import re import string import numpy as np import datasets UpperCAmelCase_ = "\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n" UpperCAmelCase_ = "\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results[\"exact_match\"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]\n >>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 33.3\n\n" UpperCAmelCase_ = "\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __UpperCamelCase ( datasets.Metric ): def UpperCamelCase( self ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Value('''string''' , id='''sequence''' ), } ) , reference_urls=[] , ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=False , _UpperCamelCase=False , _UpperCamelCase=False , ): if regexes_to_ignore is not None: for s in regexes_to_ignore: _UpperCAmelCase = np.array([re.sub(_UpperCamelCase , '''''' , _UpperCamelCase ) for x in predictions] ) _UpperCAmelCase = np.array([re.sub(_UpperCamelCase , '''''' , _UpperCamelCase ) for x in references] ) else: _UpperCAmelCase = np.asarray(_UpperCamelCase ) _UpperCAmelCase = np.asarray(_UpperCamelCase ) if ignore_case: _UpperCAmelCase = np.char.lower(_UpperCamelCase ) _UpperCAmelCase = np.char.lower(_UpperCamelCase ) if ignore_punctuation: _UpperCAmelCase = string.punctuation.maketrans('''''' , '''''' , string.punctuation ) _UpperCAmelCase = np.char.translate(_UpperCamelCase , table=_UpperCamelCase ) _UpperCAmelCase = np.char.translate(_UpperCamelCase , table=_UpperCamelCase ) if ignore_numbers: _UpperCAmelCase = string.digits.maketrans('''''' , '''''' , string.digits ) _UpperCAmelCase = np.char.translate(_UpperCamelCase , table=_UpperCamelCase ) _UpperCAmelCase = np.char.translate(_UpperCamelCase , table=_UpperCamelCase ) _UpperCAmelCase = predictions == references return {"exact_match": np.mean(_UpperCamelCase ) * 100}
32
from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Value from .base import TaskTemplate @dataclass(frozen=A__ ) class __UpperCamelCase ( A__ ): __A : str = field(default="""language-modeling""" , metadata={"""include_in_asdict_even_if_is_default""": True} ) __A : ClassVar[Features] = Features({"""text""": Value("""string""" )} ) __A : ClassVar[Features] = Features({} ) __A : str = "text" @property def UpperCamelCase( self ): return {self.text_column: "text"}
32
1
import os from pathlib import Path from unittest.mock import patch import pytest import zstandard as zstd from datasets.download.download_config import DownloadConfig from datasets.utils.file_utils import ( OfflineModeIsEnabled, cached_path, fsspec_get, fsspec_head, ftp_get, ftp_head, get_from_cache, http_get, http_head, ) UpperCAmelCase_ = "\\n Text data.\n Second line of data." UpperCAmelCase_ = "file" @pytest.fixture(scope='''session''' ) def A__ ( SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Dict: """simple docstring""" _UpperCAmelCase = tmp_path_factory.mktemp('''data''' ) / (FILE_PATH + '''.zstd''') _UpperCAmelCase = bytes(SCREAMING_SNAKE_CASE_ , '''utf-8''' ) with zstd.open(SCREAMING_SNAKE_CASE_ , '''wb''' ) as f: f.write(SCREAMING_SNAKE_CASE_ ) return path @pytest.fixture def A__ ( SCREAMING_SNAKE_CASE_ : Tuple ) -> Optional[Any]: """simple docstring""" with open(os.path.join(tmpfs.local_root_dir , SCREAMING_SNAKE_CASE_ ) , '''w''' ) as f: f.write(SCREAMING_SNAKE_CASE_ ) return FILE_PATH @pytest.mark.parametrize('''compression_format''' , ['''gzip''', '''xz''', '''zstd'''] ) def A__ ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Any ) -> str: """simple docstring""" _UpperCAmelCase = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path} _UpperCAmelCase = input_paths[compression_format] _UpperCAmelCase = tmp_path / '''cache''' _UpperCAmelCase = DownloadConfig(cache_dir=SCREAMING_SNAKE_CASE_ , extract_compressed_file=SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = cached_path(SCREAMING_SNAKE_CASE_ , download_config=SCREAMING_SNAKE_CASE_ ) with open(SCREAMING_SNAKE_CASE_ ) as f: _UpperCAmelCase = f.read() with open(SCREAMING_SNAKE_CASE_ ) as f: _UpperCAmelCase = f.read() assert extracted_file_content == expected_file_content @pytest.mark.parametrize('''default_extracted''' , [True, False] ) @pytest.mark.parametrize('''default_cache_dir''' , [True, False] ) def A__ ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> List[str]: """simple docstring""" _UpperCAmelCase = '''custom_cache''' _UpperCAmelCase = '''custom_extracted_dir''' _UpperCAmelCase = tmp_path / '''custom_extracted_path''' if default_extracted: _UpperCAmelCase = ('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''') else: monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_DIR''' , SCREAMING_SNAKE_CASE_ ) monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(SCREAMING_SNAKE_CASE_ ) ) _UpperCAmelCase = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir) _UpperCAmelCase = xz_file _UpperCAmelCase = ( DownloadConfig(extract_compressed_file=SCREAMING_SNAKE_CASE_ ) if default_cache_dir else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=SCREAMING_SNAKE_CASE_ ) ) _UpperCAmelCase = cached_path(SCREAMING_SNAKE_CASE_ , download_config=SCREAMING_SNAKE_CASE_ ) assert Path(SCREAMING_SNAKE_CASE_ ).parent.parts[-2:] == expected def A__ ( SCREAMING_SNAKE_CASE_ : Any ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = str(Path(SCREAMING_SNAKE_CASE_ ).resolve() ) assert cached_path(SCREAMING_SNAKE_CASE_ ) == text_file # relative path _UpperCAmelCase = str(Path(SCREAMING_SNAKE_CASE_ ).resolve().relative_to(Path(os.getcwd() ) ) ) assert cached_path(SCREAMING_SNAKE_CASE_ ) == text_file def A__ ( SCREAMING_SNAKE_CASE_ : List[Any] ) -> List[Any]: """simple docstring""" _UpperCAmelCase = str(tmp_path.resolve() / '''__missing_file__.txt''' ) with pytest.raises(SCREAMING_SNAKE_CASE_ ): cached_path(SCREAMING_SNAKE_CASE_ ) # relative path _UpperCAmelCase = '''./__missing_file__.txt''' with pytest.raises(SCREAMING_SNAKE_CASE_ ): cached_path(SCREAMING_SNAKE_CASE_ ) def A__ ( SCREAMING_SNAKE_CASE_ : List[Any] ) -> Any: """simple docstring""" _UpperCAmelCase = get_from_cache(F'''tmp://{tmpfs_file}''' ) with open(SCREAMING_SNAKE_CASE_ ) as f: _UpperCAmelCase = f.read() assert output_file_content == FILE_CONTENT @patch('''datasets.config.HF_DATASETS_OFFLINE''' , SCREAMING_SNAKE_CASE_ ) def A__ ( ) -> Any: """simple docstring""" with pytest.raises(SCREAMING_SNAKE_CASE_ ): cached_path('''https://huggingface.co''' ) @patch('''datasets.config.HF_DATASETS_OFFLINE''' , SCREAMING_SNAKE_CASE_ ) def A__ ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> str: """simple docstring""" _UpperCAmelCase = tmp_path_factory.mktemp('''data''' ) / '''file.html''' with pytest.raises(SCREAMING_SNAKE_CASE_ ): http_get('''https://huggingface.co''' , temp_file=SCREAMING_SNAKE_CASE_ ) with pytest.raises(SCREAMING_SNAKE_CASE_ ): http_head('''https://huggingface.co''' ) @patch('''datasets.config.HF_DATASETS_OFFLINE''' , SCREAMING_SNAKE_CASE_ ) def A__ ( SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = tmp_path_factory.mktemp('''data''' ) / '''file.html''' with pytest.raises(SCREAMING_SNAKE_CASE_ ): ftp_get('''ftp://huggingface.co''' , temp_file=SCREAMING_SNAKE_CASE_ ) with pytest.raises(SCREAMING_SNAKE_CASE_ ): ftp_head('''ftp://huggingface.co''' ) @patch('''datasets.config.HF_DATASETS_OFFLINE''' , SCREAMING_SNAKE_CASE_ ) def A__ ( SCREAMING_SNAKE_CASE_ : Tuple ) -> List[Any]: """simple docstring""" _UpperCAmelCase = tmp_path_factory.mktemp('''data''' ) / '''file.html''' with pytest.raises(SCREAMING_SNAKE_CASE_ ): fsspec_get('''s3://huggingface.co''' , temp_file=SCREAMING_SNAKE_CASE_ ) with pytest.raises(SCREAMING_SNAKE_CASE_ ): fsspec_head('''s3://huggingface.co''' )
32
import os import re import warnings from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer if TYPE_CHECKING: from ...tokenization_utils_base import TextInput from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = {"vocab_file": "spiece.model"} UpperCAmelCase_ = { "vocab_file": { "t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model", "t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model", "t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model", "t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model", "t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model", } } # TODO(PVP) - this should be removed in Transformers v5 UpperCAmelCase_ = { "t5-small": 5_12, "t5-base": 5_12, "t5-large": 5_12, "t5-3b": 5_12, "t5-11b": 5_12, } UpperCAmelCase_ = "▁" class __UpperCamelCase ( A__ ): __A : Any = VOCAB_FILES_NAMES __A : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP __A : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __A : Tuple = ["""input_ids""", """attention_mask"""] def __init__( self , _UpperCamelCase , _UpperCamelCase="</s>" , _UpperCamelCase="<unk>" , _UpperCamelCase="<pad>" , _UpperCamelCase=100 , _UpperCamelCase=None , _UpperCamelCase = None , _UpperCamelCase=True , **_UpperCamelCase , ): # Add extra_ids to the special token list if extra_ids > 0 and additional_special_tokens is None: _UpperCAmelCase = [f'''<extra_id_{i}>''' for i in range(_UpperCamelCase )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra_id special tokens _UpperCAmelCase = len(set(filter(lambda _UpperCamelCase : bool('''extra_id''' in str(_UpperCamelCase ) ) , _UpperCamelCase ) ) ) if extra_tokens != extra_ids: raise ValueError( f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are''' ''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids''' ''' tokens''' ) if legacy: logger.warning_once( f'''You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to''' ''' read the related pull request available at https://github.com/huggingface/transformers/pull/24565''' ) _UpperCAmelCase = legacy _UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , pad_token=_UpperCamelCase , extra_ids=_UpperCamelCase , additional_special_tokens=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , legacy=_UpperCamelCase , **_UpperCamelCase , ) _UpperCAmelCase = vocab_file _UpperCAmelCase = extra_ids _UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_UpperCamelCase ) @staticmethod def UpperCamelCase( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes: _UpperCAmelCase = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path] if init_max_model_length is not None and init_max_model_length != max_model_length: return init_max_model_length elif init_max_model_length is None: warnings.warn( '''This tokenizer was incorrectly instantiated with a model max length of''' f''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this''' ''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with''' ''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on''' f''' {pretrained_model_name_or_path} automatically truncating your input to''' f''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences''' f''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with''' ''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please''' ''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , _UpperCamelCase , ) return max_model_length @property def UpperCamelCase( self ): return self.sp_model.get_piece_size() + self._extra_ids def UpperCamelCase( self ): _UpperCAmelCase = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase ) # normal case: some special tokens if token_ids_a is None: return ([0] * len(_UpperCamelCase )) + [1] return ([0] * len(_UpperCamelCase )) + [1] + ([0] * len(_UpperCamelCase )) + [1] def UpperCamelCase( self ): return list( set(filter(lambda _UpperCamelCase : bool(re.search(R'''<extra_id_\d+>''' , _UpperCamelCase ) ) is not None , self.additional_special_tokens ) ) ) def UpperCamelCase( self ): return [self._convert_token_to_id(_UpperCamelCase ) for token in self.get_sentinel_tokens()] def UpperCamelCase( self , _UpperCamelCase ): if len(_UpperCamelCase ) > 0 and token_ids[-1] == self.eos_token_id: warnings.warn( f'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated''' ''' eos tokens being added.''' ) return token_ids else: return token_ids + [self.eos_token_id] def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None ): _UpperCAmelCase = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None ): _UpperCAmelCase = self._add_eos_if_not_present(_UpperCamelCase ) if token_ids_a is None: return token_ids_a else: _UpperCAmelCase = self._add_eos_if_not_present(_UpperCamelCase ) return token_ids_a + token_ids_a def __getstate__( self ): _UpperCAmelCase = self.__dict__.copy() _UpperCAmelCase = None return state def __setstate__( self , _UpperCamelCase ): _UpperCAmelCase = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): _UpperCAmelCase = {} _UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def UpperCamelCase( self , _UpperCamelCase , **_UpperCamelCase ): # Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at # the beginning of the text if not self.legacy: _UpperCAmelCase = SPIECE_UNDERLINE + text.replace(_UpperCamelCase , ''' ''' ) return super().tokenize(_UpperCamelCase , **_UpperCamelCase ) def UpperCamelCase( self , _UpperCamelCase , **_UpperCamelCase ): if not self.legacy: _UpperCAmelCase = text.startswith(_UpperCamelCase ) if is_first: _UpperCAmelCase = text[1:] _UpperCAmelCase = self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase ) if not self.legacy and not is_first and not text.startswith(''' ''' ) and tokens[0].startswith(_UpperCamelCase ): _UpperCAmelCase = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:] return tokens def UpperCamelCase( self , _UpperCamelCase ): if token.startswith('''<extra_id_''' ): _UpperCAmelCase = re.match(R'''<extra_id_(\d+)>''' , _UpperCamelCase ) _UpperCAmelCase = int(match.group(1 ) ) return self.vocab_size - num - 1 return self.sp_model.piece_to_id(_UpperCamelCase ) def UpperCamelCase( self , _UpperCamelCase ): if index < self.sp_model.get_piece_size(): _UpperCAmelCase = self.sp_model.IdToPiece(_UpperCamelCase ) else: _UpperCAmelCase = f'''<extra_id_{self.vocab_size - 1 - index}>''' return token def UpperCamelCase( self , _UpperCamelCase ): _UpperCAmelCase = [] _UpperCAmelCase = '''''' _UpperCAmelCase = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(_UpperCamelCase ) + token _UpperCAmelCase = True _UpperCAmelCase = [] else: current_sub_tokens.append(_UpperCamelCase ) _UpperCAmelCase = False out_string += self.sp_model.decode(_UpperCamelCase ) return out_string.strip() def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None ): if not os.path.isdir(_UpperCamelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return _UpperCAmelCase = os.path.join( _UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _UpperCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(_UpperCamelCase , '''wb''' ) as fi: _UpperCAmelCase = self.sp_model.serialized_model_proto() fi.write(_UpperCamelCase ) return (out_vocab_file,)
32
1
from collections import defaultdict from pathlib import Path import pandas as pd from rouge_cli import calculate_rouge_path from utils import calculate_rouge UpperCAmelCase_ = [ "Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the" " final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe" " depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.", "The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal" " accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's" " founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the" " body.", "Amnesty International releases its annual report on the death penalty. The report catalogs the use of" " state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the" " world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital" " punishment.", ] UpperCAmelCase_ = [ "Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ." " Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz" " had informed his Lufthansa training school of an episode of severe depression, airline says .", "Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ." " Israel and the United States opposed the move, which could open the door to war crimes investigations against" " Israelis .", "Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to" " death . Organization claims that governments around the world are using the threat of terrorism to advance" " executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death" " sentences up by 28% .", ] def A__ ( ) -> Tuple: """simple docstring""" _UpperCAmelCase = calculate_rouge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , bootstrap_aggregation=SCREAMING_SNAKE_CASE_ , rouge_keys=['''rouge2''', '''rougeL'''] ) assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = calculate_rouge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , bootstrap_aggregation=SCREAMING_SNAKE_CASE_ , rouge_keys=['''rouge2'''] ) assert ( pd.DataFrame(no_aggregation['''rouge2'''] ).fmeasure.mean() == pd.DataFrame(no_aggregation_just_ra['''rouge2'''] ).fmeasure.mean() ) def A__ ( ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = '''rougeLsum''' _UpperCAmelCase = calculate_rouge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , newline_sep=SCREAMING_SNAKE_CASE_ , rouge_keys=[k] )[k] _UpperCAmelCase = calculate_rouge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , newline_sep=SCREAMING_SNAKE_CASE_ , rouge_keys=[k] )[k] assert score > score_no_sep def A__ ( ) -> Tuple: """simple docstring""" _UpperCAmelCase = ['''rouge1''', '''rouge2''', '''rougeL'''] _UpperCAmelCase = calculate_rouge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , newline_sep=SCREAMING_SNAKE_CASE_ , rouge_keys=SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = calculate_rouge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , newline_sep=SCREAMING_SNAKE_CASE_ , rouge_keys=SCREAMING_SNAKE_CASE_ ) assert score_sep == score_no_sep def A__ ( ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = [ '''Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.''', '''Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .''', ] _UpperCAmelCase = [ '''Margot Frank, died in 1945, a month earlier than previously thought.''', '''Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of''' ''' the final seconds on board Flight 9525.''', ] assert calculate_rouge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , newline_sep=SCREAMING_SNAKE_CASE_ ) == calculate_rouge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , newline_sep=SCREAMING_SNAKE_CASE_ ) def A__ ( ) -> int: """simple docstring""" _UpperCAmelCase = [ '''" "a person who has such a video needs to immediately give it to the investigators," prosecutor says .<n> "it is a very disturbing scene," editor-in-chief of bild online tells "erin burnett: outfront" ''' ] _UpperCAmelCase = [ ''' Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports . Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says .''' ] _UpperCAmelCase = calculate_rouge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , rouge_keys=['''rougeLsum'''] , newline_sep=SCREAMING_SNAKE_CASE_ )['''rougeLsum'''] _UpperCAmelCase = calculate_rouge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , rouge_keys=['''rougeLsum'''] )['''rougeLsum'''] assert new_score > prev_score def A__ ( ) -> Dict: """simple docstring""" _UpperCAmelCase = Path('''examples/seq2seq/test_data/wmt_en_ro''' ) _UpperCAmelCase = calculate_rouge_path(data_dir.joinpath('''test.source''' ) , data_dir.joinpath('''test.target''' ) ) assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = calculate_rouge_path( data_dir.joinpath('''test.source''' ) , data_dir.joinpath('''test.target''' ) , bootstrap_aggregation=SCREAMING_SNAKE_CASE_ ) assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
32
from __future__ import annotations def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> bool: """simple docstring""" _UpperCAmelCase = str(SCREAMING_SNAKE_CASE_ ) return len(SCREAMING_SNAKE_CASE_ ) == 9 and set(SCREAMING_SNAKE_CASE_ ) == set('''123456789''' ) def A__ ( ) -> int | None: """simple docstring""" for base_num in range(99_99 , 49_99 , -1 ): _UpperCAmelCase = 10_00_02 * base_num if is_9_pandigital(SCREAMING_SNAKE_CASE_ ): return candidate for base_num in range(3_33 , 99 , -1 ): _UpperCAmelCase = 1_00_20_03 * base_num if is_9_pandigital(SCREAMING_SNAKE_CASE_ ): return candidate return None if __name__ == "__main__": print(f'''{solution() = }''')
32
1
import argparse import json from typing import List from ltp import LTP from transformers.models.bert.tokenization_bert import BertTokenizer def A__ ( SCREAMING_SNAKE_CASE_ : Dict ) -> Any: """simple docstring""" if ( (cp >= 0x4_e00 and cp <= 0x9_fff) or (cp >= 0x3_400 and cp <= 0x4_dbf) # or (cp >= 0x20_000 and cp <= 0x2a_6df) # or (cp >= 0x2a_700 and cp <= 0x2b_73f) # or (cp >= 0x2b_740 and cp <= 0x2b_81f) # or (cp >= 0x2b_820 and cp <= 0x2c_eaf) # or (cp >= 0xf_900 and cp <= 0xf_aff) or (cp >= 0x2f_800 and cp <= 0x2f_a1f) # ): # return True return False def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> str: """simple docstring""" for char in word: _UpperCAmelCase = ord(SCREAMING_SNAKE_CASE_ ) if not _is_chinese_char(SCREAMING_SNAKE_CASE_ ): return 0 return 1 def A__ ( SCREAMING_SNAKE_CASE_ : List[str] ) -> int: """simple docstring""" _UpperCAmelCase = set() for token in tokens: _UpperCAmelCase = len(SCREAMING_SNAKE_CASE_ ) > 1 and is_chinese(SCREAMING_SNAKE_CASE_ ) if chinese_word: word_set.add(SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = list(SCREAMING_SNAKE_CASE_ ) return word_list def A__ ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : set() ) -> Tuple: """simple docstring""" if not chinese_word_set: return bert_tokens _UpperCAmelCase = max([len(SCREAMING_SNAKE_CASE_ ) for w in chinese_word_set] ) _UpperCAmelCase = bert_tokens _UpperCAmelCase , _UpperCAmelCase = 0, len(SCREAMING_SNAKE_CASE_ ) while start < end: _UpperCAmelCase = True if is_chinese(bert_word[start] ): _UpperCAmelCase = min(end - start , SCREAMING_SNAKE_CASE_ ) for i in range(SCREAMING_SNAKE_CASE_ , 1 , -1 ): _UpperCAmelCase = ''''''.join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 , start + i ): _UpperCAmelCase = '''##''' + bert_word[j] _UpperCAmelCase = start + i _UpperCAmelCase = False break if single_word: start += 1 return bert_word def A__ ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : LTP , SCREAMING_SNAKE_CASE_ : BertTokenizer ) -> str: """simple docstring""" _UpperCAmelCase = [] for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , 1_00 ): _UpperCAmelCase = ltp_tokenizer.pipeline(lines[i : i + 1_00] , tasks=['''cws'''] ).cws _UpperCAmelCase = [get_chinese_word(SCREAMING_SNAKE_CASE_ ) for r in res] ltp_res.extend(SCREAMING_SNAKE_CASE_ ) assert len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = [] for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , 1_00 ): _UpperCAmelCase = bert_tokenizer(lines[i : i + 1_00] , add_special_tokens=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=5_12 ) bert_res.extend(res['''input_ids'''] ) assert len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = [] for input_ids, chinese_word in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): _UpperCAmelCase = [] for id in input_ids: _UpperCAmelCase = bert_tokenizer._convert_id_to_token(SCREAMING_SNAKE_CASE_ ) input_tokens.append(SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = add_sub_symbol(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(SCREAMING_SNAKE_CASE_ ): if token[:2] == "##": _UpperCAmelCase = token[2:] # save chinese tokens' pos if len(SCREAMING_SNAKE_CASE_ ) == 1 and _is_chinese_char(ord(SCREAMING_SNAKE_CASE_ ) ): ref_id.append(SCREAMING_SNAKE_CASE_ ) ref_ids.append(SCREAMING_SNAKE_CASE_ ) assert len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ ) return ref_ids def A__ ( SCREAMING_SNAKE_CASE_ : Tuple ) -> Dict: """simple docstring""" with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f: _UpperCAmelCase = f.readlines() _UpperCAmelCase = [line.strip() for line in data if len(SCREAMING_SNAKE_CASE_ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' _UpperCAmelCase = LTP(args.ltp ) # faster in GPU device _UpperCAmelCase = BertTokenizer.from_pretrained(args.bert ) _UpperCAmelCase = prepare_ref(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f: _UpperCAmelCase = [json.dumps(SCREAMING_SNAKE_CASE_ ) + '''\n''' for ref in ref_ids] f.writelines(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser(description="prepare_chinese_ref") parser.add_argument( "--file_name", required=False, type=str, default="./resources/chinese-demo.txt", help="file need process, same as training data in lm", ) parser.add_argument( "--ltp", required=False, type=str, default="./resources/ltp", help="resources for LTP tokenizer, usually a path", ) parser.add_argument( "--bert", required=False, type=str, default="./resources/robert", help="resources for Bert tokenizer", ) parser.add_argument( "--save_path", required=False, type=str, default="./resources/ref.txt", help="path to save res", ) UpperCAmelCase_ = parser.parse_args() main(args)
32
import numpy as np def A__ ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : float ) -> np.ndarray: """simple docstring""" return np.where(vector > 0 , SCREAMING_SNAKE_CASE_ , (alpha * (np.exp(SCREAMING_SNAKE_CASE_ ) - 1)) ) if __name__ == "__main__": import doctest doctest.testmod()
32
1
from manim import * class __UpperCamelCase ( A__ ): def UpperCamelCase( self ): _UpperCAmelCase = Rectangle(height=0.5 , width=0.5 ) _UpperCAmelCase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) _UpperCAmelCase = Rectangle(height=0.25 , width=0.25 ) _UpperCAmelCase = [mem.copy() for i in range(6 )] _UpperCAmelCase = [mem.copy() for i in range(6 )] _UpperCAmelCase = VGroup(*_UpperCamelCase ).arrange(_UpperCamelCase , buff=0 ) _UpperCAmelCase = VGroup(*_UpperCamelCase ).arrange(_UpperCamelCase , buff=0 ) _UpperCAmelCase = VGroup(_UpperCamelCase , _UpperCamelCase ).arrange(_UpperCamelCase , buff=0 ) _UpperCAmelCase = Text('''CPU''' , font_size=24 ) _UpperCAmelCase = Group(_UpperCamelCase , _UpperCamelCase ).arrange(_UpperCamelCase , buff=0.5 , aligned_edge=_UpperCamelCase ) cpu.move_to([-2.5, -0.5, 0] ) self.add(_UpperCamelCase ) _UpperCAmelCase = [mem.copy() for i in range(4 )] _UpperCAmelCase = VGroup(*_UpperCamelCase ).arrange(_UpperCamelCase , buff=0 ) _UpperCAmelCase = Text('''GPU''' , font_size=24 ) _UpperCAmelCase = Group(_UpperCamelCase , _UpperCamelCase ).arrange(_UpperCamelCase , buff=0.5 , aligned_edge=_UpperCamelCase ) gpu.move_to([-1, -1, 0] ) self.add(_UpperCamelCase ) _UpperCAmelCase = [mem.copy() for i in range(6 )] _UpperCAmelCase = VGroup(*_UpperCamelCase ).arrange(_UpperCamelCase , buff=0 ) _UpperCAmelCase = Text('''Model''' , font_size=24 ) _UpperCAmelCase = Group(_UpperCamelCase , _UpperCamelCase ).arrange(_UpperCamelCase , buff=0.5 , aligned_edge=_UpperCamelCase ) model.move_to([3, -1.0, 0] ) self.add(_UpperCamelCase ) _UpperCAmelCase = [] _UpperCAmelCase = [] for i, rect in enumerate(_UpperCamelCase ): _UpperCAmelCase = fill.copy().set_fill(_UpperCamelCase , opacity=0.8 ) target.move_to(_UpperCamelCase ) model_arr.append(_UpperCamelCase ) _UpperCAmelCase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(_UpperCamelCase , opacity=0.8 ) cpu_target.move_to(cpu_left_col_base[i] ) model_cpu_arr.append(_UpperCamelCase ) self.add(*_UpperCamelCase , *_UpperCamelCase ) _UpperCAmelCase = [meta_mem.copy() for i in range(6 )] _UpperCAmelCase = [meta_mem.copy() for i in range(6 )] _UpperCAmelCase = VGroup(*_UpperCamelCase ).arrange(_UpperCamelCase , buff=0 ) _UpperCAmelCase = VGroup(*_UpperCamelCase ).arrange(_UpperCamelCase , buff=0 ) _UpperCAmelCase = VGroup(_UpperCamelCase , _UpperCamelCase ).arrange(_UpperCamelCase , buff=0 ) _UpperCAmelCase = Text('''Disk''' , font_size=24 ) _UpperCAmelCase = Group(_UpperCamelCase , _UpperCamelCase ).arrange(_UpperCamelCase , buff=0.5 , aligned_edge=_UpperCamelCase ) disk.move_to([-4, -1.25, 0] ) self.add(_UpperCamelCase , _UpperCamelCase ) _UpperCAmelCase = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) _UpperCAmelCase = MarkupText( f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(_UpperCamelCase , _UpperCamelCase ) _UpperCAmelCase = MarkupText( f'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , ) blue_text.next_to(_UpperCamelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(_UpperCamelCase ) _UpperCAmelCase = MarkupText( f'''Now watch as an input is passed through the model\nand how the memory is utilized and handled.''' , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(_UpperCamelCase ) ) _UpperCAmelCase = Square(0.3 ) input.set_fill(_UpperCamelCase , opacity=1.0 ) input.set_stroke(width=0.0 ) input.next_to(model_base[0] , _UpperCamelCase , buff=0.5 ) self.play(Write(_UpperCamelCase ) ) input.generate_target() input.target.next_to(model_arr[0] , direction=_UpperCamelCase , buff=0.02 ) self.play(MoveToTarget(_UpperCamelCase ) ) self.play(FadeOut(_UpperCamelCase ) ) _UpperCAmelCase = Arrow(start=_UpperCamelCase , end=_UpperCamelCase , color=_UpperCamelCase , buff=0.5 ) a.next_to(model_arr[0].get_left() , _UpperCamelCase , buff=0.2 ) model_cpu_arr[0].generate_target() model_cpu_arr[0].target.move_to(gpu_rect[0] ) _UpperCAmelCase = MarkupText( f'''As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.''' , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(_UpperCamelCase , run_time=3 ) ) _UpperCAmelCase = {'''run_time''': 1, '''fade_in''': True, '''fade_out''': True, '''buff''': 0.02} self.play( Write(_UpperCamelCase ) , Circumscribe(model_arr[0] , color=_UpperCamelCase , **_UpperCamelCase ) , Circumscribe(model_cpu_arr[0] , color=_UpperCamelCase , **_UpperCamelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCamelCase , **_UpperCamelCase ) , ) self.play(MoveToTarget(model_cpu_arr[0] ) ) _UpperCAmelCase = a.copy() for i in range(6 ): a_c.next_to(model_arr[i].get_right() + 0.02 , _UpperCamelCase , buff=0.2 ) input.generate_target() input.target.move_to(model_arr[i].get_right() + 0.02 ) _UpperCAmelCase = AnimationGroup( FadeOut(_UpperCamelCase , run_time=0.5 ) , MoveToTarget(_UpperCamelCase , run_time=0.5 ) , FadeIn(_UpperCamelCase , run_time=0.5 ) , lag_ratio=0.2 ) self.play(_UpperCamelCase ) model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[i] ) if i < 5: model_cpu_arr[i + 1].generate_target() model_cpu_arr[i + 1].target.move_to(gpu_rect[0] ) if i >= 1: _UpperCAmelCase = 0.7 self.play( Circumscribe(model_arr[i] , **_UpperCamelCase ) , Circumscribe(cpu_left_col_base[i] , **_UpperCamelCase ) , Circumscribe(cpu_left_col_base[i + 1] , color=_UpperCamelCase , **_UpperCamelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCamelCase , **_UpperCamelCase ) , Circumscribe(model_arr[i + 1] , color=_UpperCamelCase , **_UpperCamelCase ) , ) if i < 1: self.play( MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , ) else: self.play( MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , ) else: model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] ) input.generate_target() input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 ) self.play( Circumscribe(model_arr[-1] , color=_UpperCamelCase , **_UpperCamelCase ) , Circumscribe(cpu_left_col_base[-1] , color=_UpperCamelCase , **_UpperCamelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCamelCase , **_UpperCamelCase ) , ) self.play(MoveToTarget(model_cpu_arr[i] ) ) _UpperCAmelCase = a_c _UpperCAmelCase = a_c.copy() input.generate_target() input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 ) self.play( FadeOut(_UpperCamelCase ) , FadeOut(_UpperCamelCase , run_time=0.5 ) , ) _UpperCAmelCase = MarkupText(f'''Inference on a model too large for GPU memory\nis successfully completed.''' , font_size=24 ) step_a.move_to([2, 2, 0] ) self.play(Write(_UpperCamelCase , run_time=3 ) , MoveToTarget(_UpperCamelCase ) ) self.wait()
32
UpperCAmelCase_ = { "A": ".-", "B": "-...", "C": "-.-.", "D": "-..", "E": ".", "F": "..-.", "G": "--.", "H": "....", "I": "..", "J": ".---", "K": "-.-", "L": ".-..", "M": "--", "N": "-.", "O": "---", "P": ".--.", "Q": "--.-", "R": ".-.", "S": "...", "T": "-", "U": "..-", "V": "...-", "W": ".--", "X": "-..-", "Y": "-.--", "Z": "--..", "1": ".----", "2": "..---", "3": "...--", "4": "....-", "5": ".....", "6": "-....", "7": "--...", "8": "---..", "9": "----.", "0": "-----", "&": ".-...", "@": ".--.-.", ":": "---...", ",": "--..--", ".": ".-.-.-", "'": ".----.", "\"": ".-..-.", "?": "..--..", "/": "-..-.", "=": "-...-", "+": ".-.-.", "-": "-....-", "(": "-.--.", ")": "-.--.-", "!": "-.-.--", " ": "/" } # Exclamation mark is not in ITU-R recommendation # fmt: on UpperCAmelCase_ = {value: key for key, value in MORSE_CODE_DICT.items()} def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> str: """simple docstring""" return " ".join(MORSE_CODE_DICT[char] for char in message.upper() ) def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> str: """simple docstring""" return "".join(REVERSE_DICT[char] for char in message.split() ) def A__ ( ) -> None: """simple docstring""" _UpperCAmelCase = '''Morse code here!''' print(SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = encrypt(SCREAMING_SNAKE_CASE_ ) print(SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = decrypt(SCREAMING_SNAKE_CASE_ ) print(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": main()
32
1
from __future__ import annotations from collections.abc import Iterable, Iterator from dataclasses import dataclass UpperCAmelCase_ = (3, 9, -11, 0, 7, 5, 1, -1) UpperCAmelCase_ = (4, 6, 2, 0, 8, 10, 3, -2) @dataclass class __UpperCamelCase : __A : int __A : Node | None class __UpperCamelCase : def __init__( self , _UpperCamelCase ): _UpperCAmelCase = None for i in sorted(_UpperCamelCase , reverse=_UpperCamelCase ): _UpperCAmelCase = Node(_UpperCamelCase , self.head ) def __iter__( self ): _UpperCAmelCase = self.head while node: yield node.data _UpperCAmelCase = node.next_node def __len__( self ): return sum(1 for _ in self ) def __str__( self ): return " -> ".join([str(_UpperCamelCase ) for node in self] ) def A__ ( SCREAMING_SNAKE_CASE_ : SortedLinkedList , SCREAMING_SNAKE_CASE_ : SortedLinkedList ) -> SortedLinkedList: """simple docstring""" return SortedLinkedList(list(SCREAMING_SNAKE_CASE_ ) + list(SCREAMING_SNAKE_CASE_ ) ) if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase_ = SortedLinkedList print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
32
import gc import unittest import numpy as np import torch from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class __UpperCamelCase ( A__ , unittest.TestCase ): __A : Any = DanceDiffusionPipeline __A : Any = UNCONDITIONAL_AUDIO_GENERATION_PARAMS __A : Tuple = PipelineTesterMixin.required_optional_params - { """callback""", """latents""", """callback_steps""", """output_type""", """num_images_per_prompt""", } __A : Tuple = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS __A : List[str] = False __A : str = False def UpperCamelCase( self ): torch.manual_seed(0 ) _UpperCAmelCase = UNetaDModel( block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=16000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=_UpperCamelCase , use_timestep_embedding=_UpperCamelCase , time_embedding_type='''fourier''' , mid_block_type='''UNetMidBlock1D''' , down_block_types=('''DownBlock1DNoSkip''', '''DownBlock1D''', '''AttnDownBlock1D''') , up_block_types=('''AttnUpBlock1D''', '''UpBlock1D''', '''UpBlock1DNoSkip''') , ) _UpperCAmelCase = IPNDMScheduler() _UpperCAmelCase = { '''unet''': unet, '''scheduler''': scheduler, } return components def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase=0 ): if str(_UpperCamelCase ).startswith('''mps''' ): _UpperCAmelCase = torch.manual_seed(_UpperCamelCase ) else: _UpperCAmelCase = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase ) _UpperCAmelCase = { '''batch_size''': 1, '''generator''': generator, '''num_inference_steps''': 4, } return inputs def UpperCamelCase( self ): _UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator _UpperCAmelCase = self.get_dummy_components() _UpperCAmelCase = DanceDiffusionPipeline(**_UpperCamelCase ) _UpperCAmelCase = pipe.to(_UpperCamelCase ) pipe.set_progress_bar_config(disable=_UpperCamelCase ) _UpperCAmelCase = self.get_dummy_inputs(_UpperCamelCase ) _UpperCAmelCase = pipe(**_UpperCamelCase ) _UpperCAmelCase = output.audios _UpperCAmelCase = audio[0, -3:, -3:] assert audio.shape == (1, 2, components["unet"].sample_size) _UpperCAmelCase = np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2 @skip_mps def UpperCamelCase( self ): return super().test_save_load_local() @skip_mps def UpperCamelCase( self ): return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 ) @skip_mps def UpperCamelCase( self ): return super().test_save_load_optional_components() @skip_mps def UpperCamelCase( self ): return super().test_attention_slicing_forward_pass() def UpperCamelCase( self ): super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class __UpperCamelCase ( unittest.TestCase ): def UpperCamelCase( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase( self ): _UpperCAmelCase = torch_device _UpperCAmelCase = DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' ) _UpperCAmelCase = pipe.to(_UpperCamelCase ) pipe.set_progress_bar_config(disable=_UpperCamelCase ) _UpperCAmelCase = torch.manual_seed(0 ) _UpperCAmelCase = pipe(generator=_UpperCamelCase , num_inference_steps=100 , audio_length_in_s=4.096 ) _UpperCAmelCase = output.audios _UpperCAmelCase = audio[0, -3:, -3:] assert audio.shape == (1, 2, pipe.unet.sample_size) _UpperCAmelCase = np.array([-0.0192, -0.0231, -0.0318, -0.0059, 0.0002, -0.0020] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2 def UpperCamelCase( self ): _UpperCAmelCase = torch_device _UpperCAmelCase = DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' , torch_dtype=torch.floataa ) _UpperCAmelCase = pipe.to(_UpperCamelCase ) pipe.set_progress_bar_config(disable=_UpperCamelCase ) _UpperCAmelCase = torch.manual_seed(0 ) _UpperCAmelCase = pipe(generator=_UpperCamelCase , num_inference_steps=100 , audio_length_in_s=4.096 ) _UpperCAmelCase = output.audios _UpperCAmelCase = audio[0, -3:, -3:] assert audio.shape == (1, 2, pipe.unet.sample_size) _UpperCAmelCase = np.array([-0.0367, -0.0488, -0.0771, -0.0525, -0.0444, -0.0341] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
32
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging UpperCAmelCase_ = logging.get_logger(__name__) if is_vision_available(): import PIL class __UpperCamelCase ( A__ ): __A : Optional[int] = ["""pixel_values"""] def __init__( self , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = PILImageResampling.BICUBIC , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = True , _UpperCamelCase = 1 / 255 , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = True , **_UpperCamelCase , ): super().__init__(**_UpperCamelCase ) _UpperCAmelCase = size if size is not None else {'''shortest_edge''': 224} _UpperCAmelCase = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase ) _UpperCAmelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} _UpperCAmelCase = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase , param_name='''crop_size''' ) _UpperCAmelCase = do_resize _UpperCAmelCase = size _UpperCAmelCase = resample _UpperCAmelCase = do_center_crop _UpperCAmelCase = crop_size _UpperCAmelCase = do_rescale _UpperCAmelCase = rescale_factor _UpperCAmelCase = do_normalize _UpperCAmelCase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN _UpperCAmelCase = image_std if image_std is not None else OPENAI_CLIP_STD _UpperCAmelCase = do_convert_rgb def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = PILImageResampling.BICUBIC , _UpperCamelCase = None , **_UpperCamelCase , ): _UpperCAmelCase = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase ) if "shortest_edge" not in size: raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) _UpperCAmelCase = get_resize_output_image_size(_UpperCamelCase , size=size['''shortest_edge'''] , default_to_square=_UpperCamelCase ) return resize(_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ): _UpperCAmelCase = get_size_dict(_UpperCamelCase ) if "height" not in size or "width" not in size: raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(_UpperCamelCase , size=(size['''height'''], size['''width''']) , data_format=_UpperCamelCase , **_UpperCamelCase ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ): return rescale(_UpperCamelCase , scale=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ): return normalize(_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = ChannelDimension.FIRST , **_UpperCamelCase , ): _UpperCAmelCase = do_resize if do_resize is not None else self.do_resize _UpperCAmelCase = size if size is not None else self.size _UpperCAmelCase = get_size_dict(_UpperCamelCase , param_name='''size''' , default_to_square=_UpperCamelCase ) _UpperCAmelCase = resample if resample is not None else self.resample _UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop _UpperCAmelCase = crop_size if crop_size is not None else self.crop_size _UpperCAmelCase = get_size_dict(_UpperCamelCase , param_name='''crop_size''' , default_to_square=_UpperCamelCase ) _UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale _UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor _UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize _UpperCAmelCase = image_mean if image_mean is not None else self.image_mean _UpperCAmelCase = image_std if image_std is not None else self.image_std _UpperCAmelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb _UpperCAmelCase = make_list_of_images(_UpperCamelCase ) if not valid_images(_UpperCamelCase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # PIL RGBA images are converted to RGB if do_convert_rgb: _UpperCAmelCase = [convert_to_rgb(_UpperCamelCase ) for image in images] # All transformations expect numpy arrays. _UpperCAmelCase = [to_numpy_array(_UpperCamelCase ) for image in images] if do_resize: _UpperCAmelCase = [self.resize(image=_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase ) for image in images] if do_center_crop: _UpperCAmelCase = [self.center_crop(image=_UpperCamelCase , size=_UpperCamelCase ) for image in images] if do_rescale: _UpperCAmelCase = [self.rescale(image=_UpperCamelCase , scale=_UpperCamelCase ) for image in images] if do_normalize: _UpperCAmelCase = [self.normalize(image=_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase ) for image in images] _UpperCAmelCase = [to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase ) for image in images] _UpperCAmelCase = {'''pixel_values''': images} return BatchFeature(data=_UpperCamelCase , tensor_type=_UpperCamelCase )
32
from collections import OrderedDict from ...utils import logging from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update from .configuration_auto import CONFIG_MAPPING_NAMES UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = OrderedDict( [ # Base model mapping ("albert", "FlaxAlbertModel"), ("bart", "FlaxBartModel"), ("beit", "FlaxBeitModel"), ("bert", "FlaxBertModel"), ("big_bird", "FlaxBigBirdModel"), ("blenderbot", "FlaxBlenderbotModel"), ("blenderbot-small", "FlaxBlenderbotSmallModel"), ("clip", "FlaxCLIPModel"), ("distilbert", "FlaxDistilBertModel"), ("electra", "FlaxElectraModel"), ("gpt-sw3", "FlaxGPT2Model"), ("gpt2", "FlaxGPT2Model"), ("gpt_neo", "FlaxGPTNeoModel"), ("gptj", "FlaxGPTJModel"), ("longt5", "FlaxLongT5Model"), ("marian", "FlaxMarianModel"), ("mbart", "FlaxMBartModel"), ("mt5", "FlaxMT5Model"), ("opt", "FlaxOPTModel"), ("pegasus", "FlaxPegasusModel"), ("regnet", "FlaxRegNetModel"), ("resnet", "FlaxResNetModel"), ("roberta", "FlaxRobertaModel"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormModel"), ("roformer", "FlaxRoFormerModel"), ("t5", "FlaxT5Model"), ("vision-text-dual-encoder", "FlaxVisionTextDualEncoderModel"), ("vit", "FlaxViTModel"), ("wav2vec2", "FlaxWav2Vec2Model"), ("whisper", "FlaxWhisperModel"), ("xglm", "FlaxXGLMModel"), ("xlm-roberta", "FlaxXLMRobertaModel"), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for pre-training mapping ("albert", "FlaxAlbertForPreTraining"), ("bart", "FlaxBartForConditionalGeneration"), ("bert", "FlaxBertForPreTraining"), ("big_bird", "FlaxBigBirdForPreTraining"), ("electra", "FlaxElectraForPreTraining"), ("longt5", "FlaxLongT5ForConditionalGeneration"), ("mbart", "FlaxMBartForConditionalGeneration"), ("mt5", "FlaxMT5ForConditionalGeneration"), ("roberta", "FlaxRobertaForMaskedLM"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"), ("roformer", "FlaxRoFormerForMaskedLM"), ("t5", "FlaxT5ForConditionalGeneration"), ("wav2vec2", "FlaxWav2Vec2ForPreTraining"), ("whisper", "FlaxWhisperForConditionalGeneration"), ("xlm-roberta", "FlaxXLMRobertaForMaskedLM"), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for Masked LM mapping ("albert", "FlaxAlbertForMaskedLM"), ("bart", "FlaxBartForConditionalGeneration"), ("bert", "FlaxBertForMaskedLM"), ("big_bird", "FlaxBigBirdForMaskedLM"), ("distilbert", "FlaxDistilBertForMaskedLM"), ("electra", "FlaxElectraForMaskedLM"), ("mbart", "FlaxMBartForConditionalGeneration"), ("roberta", "FlaxRobertaForMaskedLM"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"), ("roformer", "FlaxRoFormerForMaskedLM"), ("xlm-roberta", "FlaxXLMRobertaForMaskedLM"), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for Seq2Seq Causal LM mapping ("bart", "FlaxBartForConditionalGeneration"), ("blenderbot", "FlaxBlenderbotForConditionalGeneration"), ("blenderbot-small", "FlaxBlenderbotSmallForConditionalGeneration"), ("encoder-decoder", "FlaxEncoderDecoderModel"), ("longt5", "FlaxLongT5ForConditionalGeneration"), ("marian", "FlaxMarianMTModel"), ("mbart", "FlaxMBartForConditionalGeneration"), ("mt5", "FlaxMT5ForConditionalGeneration"), ("pegasus", "FlaxPegasusForConditionalGeneration"), ("t5", "FlaxT5ForConditionalGeneration"), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for Image-classsification ("beit", "FlaxBeitForImageClassification"), ("regnet", "FlaxRegNetForImageClassification"), ("resnet", "FlaxResNetForImageClassification"), ("vit", "FlaxViTForImageClassification"), ] ) UpperCAmelCase_ = OrderedDict( [ ("vision-encoder-decoder", "FlaxVisionEncoderDecoderModel"), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for Causal LM mapping ("bart", "FlaxBartForCausalLM"), ("bert", "FlaxBertForCausalLM"), ("big_bird", "FlaxBigBirdForCausalLM"), ("electra", "FlaxElectraForCausalLM"), ("gpt-sw3", "FlaxGPT2LMHeadModel"), ("gpt2", "FlaxGPT2LMHeadModel"), ("gpt_neo", "FlaxGPTNeoForCausalLM"), ("gptj", "FlaxGPTJForCausalLM"), ("opt", "FlaxOPTForCausalLM"), ("roberta", "FlaxRobertaForCausalLM"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForCausalLM"), ("xglm", "FlaxXGLMForCausalLM"), ("xlm-roberta", "FlaxXLMRobertaForCausalLM"), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for Sequence Classification mapping ("albert", "FlaxAlbertForSequenceClassification"), ("bart", "FlaxBartForSequenceClassification"), ("bert", "FlaxBertForSequenceClassification"), ("big_bird", "FlaxBigBirdForSequenceClassification"), ("distilbert", "FlaxDistilBertForSequenceClassification"), ("electra", "FlaxElectraForSequenceClassification"), ("mbart", "FlaxMBartForSequenceClassification"), ("roberta", "FlaxRobertaForSequenceClassification"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForSequenceClassification"), ("roformer", "FlaxRoFormerForSequenceClassification"), ("xlm-roberta", "FlaxXLMRobertaForSequenceClassification"), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for Question Answering mapping ("albert", "FlaxAlbertForQuestionAnswering"), ("bart", "FlaxBartForQuestionAnswering"), ("bert", "FlaxBertForQuestionAnswering"), ("big_bird", "FlaxBigBirdForQuestionAnswering"), ("distilbert", "FlaxDistilBertForQuestionAnswering"), ("electra", "FlaxElectraForQuestionAnswering"), ("mbart", "FlaxMBartForQuestionAnswering"), ("roberta", "FlaxRobertaForQuestionAnswering"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForQuestionAnswering"), ("roformer", "FlaxRoFormerForQuestionAnswering"), ("xlm-roberta", "FlaxXLMRobertaForQuestionAnswering"), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for Token Classification mapping ("albert", "FlaxAlbertForTokenClassification"), ("bert", "FlaxBertForTokenClassification"), ("big_bird", "FlaxBigBirdForTokenClassification"), ("distilbert", "FlaxDistilBertForTokenClassification"), ("electra", "FlaxElectraForTokenClassification"), ("roberta", "FlaxRobertaForTokenClassification"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForTokenClassification"), ("roformer", "FlaxRoFormerForTokenClassification"), ("xlm-roberta", "FlaxXLMRobertaForTokenClassification"), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for Multiple Choice mapping ("albert", "FlaxAlbertForMultipleChoice"), ("bert", "FlaxBertForMultipleChoice"), ("big_bird", "FlaxBigBirdForMultipleChoice"), ("distilbert", "FlaxDistilBertForMultipleChoice"), ("electra", "FlaxElectraForMultipleChoice"), ("roberta", "FlaxRobertaForMultipleChoice"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMultipleChoice"), ("roformer", "FlaxRoFormerForMultipleChoice"), ("xlm-roberta", "FlaxXLMRobertaForMultipleChoice"), ] ) UpperCAmelCase_ = OrderedDict( [ ("bert", "FlaxBertForNextSentencePrediction"), ] ) UpperCAmelCase_ = OrderedDict( [ ("speech-encoder-decoder", "FlaxSpeechEncoderDecoderModel"), ("whisper", "FlaxWhisperForConditionalGeneration"), ] ) UpperCAmelCase_ = OrderedDict( [ ("whisper", "FlaxWhisperForAudioClassification"), ] ) UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES) UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES) UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES ) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES ) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES ) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES ) class __UpperCamelCase ( _BaseAutoModelClass ): __A : List[str] = FLAX_MODEL_MAPPING UpperCAmelCase_ = auto_class_update(FlaxAutoModel) class __UpperCamelCase ( _BaseAutoModelClass ): __A : Optional[Any] = FLAX_MODEL_FOR_PRETRAINING_MAPPING UpperCAmelCase_ = auto_class_update(FlaxAutoModelForPreTraining, head_doc="pretraining") class __UpperCamelCase ( _BaseAutoModelClass ): __A : List[Any] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING UpperCAmelCase_ = auto_class_update(FlaxAutoModelForCausalLM, head_doc="causal language modeling") class __UpperCamelCase ( _BaseAutoModelClass ): __A : List[str] = FLAX_MODEL_FOR_MASKED_LM_MAPPING UpperCAmelCase_ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="masked language modeling") class __UpperCamelCase ( _BaseAutoModelClass ): __A : Dict = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING UpperCAmelCase_ = auto_class_update( FlaxAutoModelForSeqaSeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base" ) class __UpperCamelCase ( _BaseAutoModelClass ): __A : List[str] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING UpperCAmelCase_ = auto_class_update( FlaxAutoModelForSequenceClassification, head_doc="sequence classification" ) class __UpperCamelCase ( _BaseAutoModelClass ): __A : Dict = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING UpperCAmelCase_ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="question answering") class __UpperCamelCase ( _BaseAutoModelClass ): __A : Union[str, Any] = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING UpperCAmelCase_ = auto_class_update( FlaxAutoModelForTokenClassification, head_doc="token classification" ) class __UpperCamelCase ( _BaseAutoModelClass ): __A : Union[str, Any] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING UpperCAmelCase_ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="multiple choice") class __UpperCamelCase ( _BaseAutoModelClass ): __A : Any = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING UpperCAmelCase_ = auto_class_update( FlaxAutoModelForNextSentencePrediction, head_doc="next sentence prediction" ) class __UpperCamelCase ( _BaseAutoModelClass ): __A : Dict = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING UpperCAmelCase_ = auto_class_update( FlaxAutoModelForImageClassification, head_doc="image classification" ) class __UpperCamelCase ( _BaseAutoModelClass ): __A : Optional[int] = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING UpperCAmelCase_ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="vision-to-text modeling") class __UpperCamelCase ( _BaseAutoModelClass ): __A : str = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING UpperCAmelCase_ = auto_class_update( FlaxAutoModelForSpeechSeqaSeq, head_doc="sequence-to-sequence speech-to-text modeling" )
32
1
import re import tempfile from pathlib import Path import pytest import yaml from datasets.utils.readme import ReadMe # @pytest.fixture # def example_yaml_structure(): UpperCAmelCase_ = yaml.safe_load( "\\nname: \"\"\nallow_empty: false\nallow_empty_text: true\nsubsections:\n - name: \"Dataset Card for X\" # First-level markdown heading\n allow_empty: false\n allow_empty_text: true\n subsections:\n - name: \"Table of Contents\"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: \"Dataset Description\"\n allow_empty: false\n allow_empty_text: false\n subsections:\n - name: \"Dataset Summary\"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: \"Supported Tasks and Leaderboards\"\n allow_empty: true\n allow_empty_text: true\n subsections: null\n - name: Languages\n allow_empty: false\n allow_empty_text: true\n subsections: null\n" ) UpperCAmelCase_ = { "name": "root", "text": "", "is_empty_text": True, "subsections": [ { "name": "Dataset Card for My Dataset", "text": "", "is_empty_text": True, "subsections": [ {"name": "Table of Contents", "text": "Some text here.", "is_empty_text": False, "subsections": []}, { "name": "Dataset Description", "text": "Some text here.", "is_empty_text": False, "subsections": [ { "name": "Dataset Summary", "text": "Some text here.", "is_empty_text": False, "subsections": [], }, { "name": "Supported Tasks and Leaderboards", "text": "", "is_empty_text": True, "subsections": [], }, {"name": "Languages", "text": "Language Text", "is_empty_text": False, "subsections": []}, ], }, ], } ], } UpperCAmelCase_ = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n" UpperCAmelCase_ = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n#### Extra Ignored Subsection\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n" UpperCAmelCase_ = { "name": "root", "text": "", "is_empty_text": True, "subsections": [ { "name": "Dataset Card for My Dataset", "text": "", "is_empty_text": True, "subsections": [ {"name": "Table of Contents", "text": "Some text here.", "is_empty_text": False, "subsections": []}, { "name": "Dataset Description", "text": "Some text here.", "is_empty_text": False, "subsections": [ { "name": "Dataset Summary", "text": "Some text here.", "is_empty_text": False, "subsections": [ { "name": "Extra Ignored Subsection", "text": "", "is_empty_text": True, "subsections": [], } ], }, { "name": "Supported Tasks and Leaderboards", "text": "", "is_empty_text": True, "subsections": [], }, {"name": "Languages", "text": "Language Text", "is_empty_text": False, "subsections": []}, ], }, ], } ], } UpperCAmelCase_ = "\\n---\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n" UpperCAmelCase_ = ( "The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README." ) UpperCAmelCase_ = "\\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n" UpperCAmelCase_ = ( "The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README." ) UpperCAmelCase_ = "\\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n" UpperCAmelCase_ = "The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README." UpperCAmelCase_ = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n" UpperCAmelCase_ = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored)." UpperCAmelCase_ = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n" UpperCAmelCase_ = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found 'None'." UpperCAmelCase_ = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Languages\nLanguage Text\n" UpperCAmelCase_ = "The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`." UpperCAmelCase_ = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\n" UpperCAmelCase_ = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty." UpperCAmelCase_ = "\\n---\nlanguage:\n- zh\n- en\n---\n\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n" UpperCAmelCase_ = "The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README." UpperCAmelCase_ = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n# Dataset Card My Dataset\n" UpperCAmelCase_ = "The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README." UpperCAmelCase_ = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n" UpperCAmelCase_ = "The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README." UpperCAmelCase_ = "" UpperCAmelCase_ = "The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README." UpperCAmelCase_ = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n" UpperCAmelCase_ = "The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections." @pytest.mark.parametrize( '''readme_md, expected_dict''' , [ (README_CORRECT, CORRECT_DICT), (README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL), ] , ) def A__ ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str ) -> List[str]: """simple docstring""" assert ReadMe.from_string(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).to_dict() == expected_dict @pytest.mark.parametrize( '''readme_md, expected_error''' , [ (README_NO_YAML, EXPECTED_ERROR_README_NO_YAML), (README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML), (README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML), (README_EMPTY, EXPECTED_ERROR_README_EMPTY), (README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION), (README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL), (README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION), (README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT), (README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL), (README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL), (README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT), ] , ) def A__ ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str ) -> Any: """simple docstring""" with pytest.raises(SCREAMING_SNAKE_CASE_ , match=re.escape(expected_error.format(path='''root''' ) ) ): _UpperCAmelCase = ReadMe.from_string(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) readme.validate() @pytest.mark.parametrize( '''readme_md, expected_error''' , [ (README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1), ] , ) def A__ ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Tuple ) -> str: """simple docstring""" with pytest.raises(SCREAMING_SNAKE_CASE_ , match=re.escape(expected_error.format(path='''root''' ) ) ): ReadMe.from_string(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) @pytest.mark.parametrize( '''readme_md,''' , [ (README_MULTIPLE_SAME_HEADING_1), ] , ) def A__ ( SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[Any]: """simple docstring""" ReadMe.from_string(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , suppress_parsing_errors=SCREAMING_SNAKE_CASE_ ) @pytest.mark.parametrize( '''readme_md, expected_dict''' , [ (README_CORRECT, CORRECT_DICT), (README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL), ] , ) def A__ ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Optional[int]: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: _UpperCAmelCase = Path(SCREAMING_SNAKE_CASE_ ) / '''README.md''' with open(SCREAMING_SNAKE_CASE_ , '''w+''' ) as readme_file: readme_file.write(SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = ReadMe.from_readme(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).to_dict() assert out["name"] == path assert out["text"] == "" assert out["is_empty_text"] assert out["subsections"] == expected_dict["subsections"] @pytest.mark.parametrize( '''readme_md, expected_error''' , [ (README_NO_YAML, EXPECTED_ERROR_README_NO_YAML), (README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML), (README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML), (README_EMPTY, EXPECTED_ERROR_README_EMPTY), (README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION), (README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL), (README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION), (README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT), (README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL), (README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL), (README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT), ] , ) def A__ ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Optional[Any]: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: _UpperCAmelCase = Path(SCREAMING_SNAKE_CASE_ ) / '''README.md''' with open(SCREAMING_SNAKE_CASE_ , '''w+''' ) as readme_file: readme_file.write(SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = expected_error.format(path=SCREAMING_SNAKE_CASE_ ) with pytest.raises(SCREAMING_SNAKE_CASE_ , match=re.escape(SCREAMING_SNAKE_CASE_ ) ): _UpperCAmelCase = ReadMe.from_readme(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) readme.validate() @pytest.mark.parametrize( '''readme_md, expected_error''' , [ (README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1), ] , ) def A__ ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Union[str, Any]: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: _UpperCAmelCase = Path(SCREAMING_SNAKE_CASE_ ) / '''README.md''' with open(SCREAMING_SNAKE_CASE_ , '''w+''' ) as readme_file: readme_file.write(SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = expected_error.format(path=SCREAMING_SNAKE_CASE_ ) with pytest.raises(SCREAMING_SNAKE_CASE_ , match=re.escape(SCREAMING_SNAKE_CASE_ ) ): ReadMe.from_readme(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) @pytest.mark.parametrize( '''readme_md,''' , [ (README_MULTIPLE_SAME_HEADING_1), ] , ) def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> int: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: _UpperCAmelCase = Path(SCREAMING_SNAKE_CASE_ ) / '''README.md''' with open(SCREAMING_SNAKE_CASE_ , '''w+''' ) as readme_file: readme_file.write(SCREAMING_SNAKE_CASE_ ) ReadMe.from_readme(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , suppress_parsing_errors=SCREAMING_SNAKE_CASE_ )
32
import baseaa def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> bytes: """simple docstring""" return baseaa.baaencode(string.encode('''utf-8''' ) ) def A__ ( SCREAMING_SNAKE_CASE_ : bytes ) -> str: """simple docstring""" return baseaa.baadecode(SCREAMING_SNAKE_CASE_ ).decode('''utf-8''' ) if __name__ == "__main__": UpperCAmelCase_ = "Hello World!" UpperCAmelCase_ = baseaa_encode(test) print(encoded) UpperCAmelCase_ = baseaa_decode(encoded) print(decoded)
32
1
def A__ ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : int ) -> float: """simple docstring""" if digit_amount > 0: return round(number - int(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) return number - int(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": print(decimal_isolate(1.53, 0)) print(decimal_isolate(35.345, 1)) print(decimal_isolate(35.345, 2)) print(decimal_isolate(35.345, 3)) print(decimal_isolate(-14.789, 3)) print(decimal_isolate(0, 2)) print(decimal_isolate(-14.123, 1)) print(decimal_isolate(-14.123, 2)) print(decimal_isolate(-14.123, 3))
32
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, is_batched, to_numpy_array, valid_images, ) from ...utils import TensorType, logging UpperCAmelCase_ = logging.get_logger(__name__) class __UpperCamelCase ( A__ ): __A : int = ["""pixel_values"""] def __init__( self , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = PILImageResampling.BICUBIC , _UpperCamelCase = True , _UpperCamelCase = True , _UpperCamelCase = 1 / 255 , _UpperCamelCase = None , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ): super().__init__(**_UpperCamelCase ) _UpperCAmelCase = size if size is not None else {'''height''': 224, '''width''': 224} _UpperCAmelCase = get_size_dict(_UpperCamelCase ) _UpperCAmelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} _UpperCAmelCase = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase , param_name='''crop_size''' ) _UpperCAmelCase = do_resize _UpperCAmelCase = do_rescale _UpperCAmelCase = do_normalize _UpperCAmelCase = do_center_crop _UpperCAmelCase = crop_size _UpperCAmelCase = size _UpperCAmelCase = resample _UpperCAmelCase = rescale_factor _UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN _UpperCAmelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = PILImageResampling.BILINEAR , _UpperCamelCase = None , **_UpperCamelCase , ): _UpperCAmelCase = get_size_dict(_UpperCamelCase ) if "shortest_edge" in size: _UpperCAmelCase = get_resize_output_image_size(_UpperCamelCase , size=size['''shortest_edge'''] , default_to_square=_UpperCamelCase ) # size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"]) elif "height" in size and "width" in size: _UpperCAmelCase = (size['''height'''], size['''width''']) else: raise ValueError(f'''Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}''' ) return resize(_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ): _UpperCAmelCase = get_size_dict(_UpperCamelCase ) if "height" not in size or "width" not in size: raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(_UpperCamelCase , size=(size['''height'''], size['''width''']) , data_format=_UpperCamelCase , **_UpperCamelCase ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase ): return rescale(_UpperCamelCase , scale=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ): return normalize(_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = ChannelDimension.FIRST , **_UpperCamelCase , ): _UpperCAmelCase = do_resize if do_resize is not None else self.do_resize _UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale _UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize _UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop _UpperCAmelCase = crop_size if crop_size is not None else self.crop_size _UpperCAmelCase = get_size_dict(_UpperCamelCase , param_name='''crop_size''' , default_to_square=_UpperCamelCase ) _UpperCAmelCase = resample if resample is not None else self.resample _UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor _UpperCAmelCase = image_mean if image_mean is not None else self.image_mean _UpperCAmelCase = image_std if image_std is not None else self.image_std _UpperCAmelCase = size if size is not None else self.size _UpperCAmelCase = get_size_dict(_UpperCamelCase ) if not is_batched(_UpperCamelCase ): _UpperCAmelCase = [images] if not valid_images(_UpperCamelCase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) # All transformations expect numpy arrays. _UpperCAmelCase = [to_numpy_array(_UpperCamelCase ) for image in images] if do_resize: _UpperCAmelCase = [self.resize(image=_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase ) for image in images] if do_center_crop: _UpperCAmelCase = [self.center_crop(image=_UpperCamelCase , size=_UpperCamelCase ) for image in images] if do_rescale: _UpperCAmelCase = [self.rescale(image=_UpperCamelCase , scale=_UpperCamelCase ) for image in images] if do_normalize: _UpperCAmelCase = [self.normalize(image=_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase ) for image in images] _UpperCAmelCase = [to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase ) for image in images] _UpperCAmelCase = {'''pixel_values''': images} return BatchFeature(data=_UpperCamelCase , tensor_type=_UpperCamelCase )
32
1
import argparse import torch from safetensors.torch import load_file from diffusers import StableDiffusionPipeline def A__ ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = StableDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE_ , torch_dtype=torch.floataa ) # load LoRA weight from .safetensors _UpperCAmelCase = load_file(SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = [] # directly update weight in diffusers model for key in state_dict: # it is suggested to print out the key, it usually will be something like below # "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight" # as we have set the alpha beforehand, so just skip if ".alpha" in key or key in visited: continue if "text" in key: _UpperCAmelCase = key.split('''.''' )[0].split(LORA_PREFIX_TEXT_ENCODER + '''_''' )[-1].split('''_''' ) _UpperCAmelCase = pipeline.text_encoder else: _UpperCAmelCase = key.split('''.''' )[0].split(LORA_PREFIX_UNET + '''_''' )[-1].split('''_''' ) _UpperCAmelCase = pipeline.unet # find the target layer _UpperCAmelCase = layer_infos.pop(0 ) while len(SCREAMING_SNAKE_CASE_ ) > -1: try: _UpperCAmelCase = curr_layer.__getattr__(SCREAMING_SNAKE_CASE_ ) if len(SCREAMING_SNAKE_CASE_ ) > 0: _UpperCAmelCase = layer_infos.pop(0 ) elif len(SCREAMING_SNAKE_CASE_ ) == 0: break except Exception: if len(SCREAMING_SNAKE_CASE_ ) > 0: temp_name += "_" + layer_infos.pop(0 ) else: _UpperCAmelCase = layer_infos.pop(0 ) _UpperCAmelCase = [] if "lora_down" in key: pair_keys.append(key.replace('''lora_down''' , '''lora_up''' ) ) pair_keys.append(SCREAMING_SNAKE_CASE_ ) else: pair_keys.append(SCREAMING_SNAKE_CASE_ ) pair_keys.append(key.replace('''lora_up''' , '''lora_down''' ) ) # update weight if len(state_dict[pair_keys[0]].shape ) == 4: _UpperCAmelCase = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) _UpperCAmelCase = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).unsqueeze(2 ).unsqueeze(3 ) else: _UpperCAmelCase = state_dict[pair_keys[0]].to(torch.floataa ) _UpperCAmelCase = state_dict[pair_keys[1]].to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # update visited list for item in pair_keys: visited.append(SCREAMING_SNAKE_CASE_ ) return pipeline if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() parser.add_argument( "--base_model_path", default=None, type=str, required=True, help="Path to the base model in diffusers format." ) parser.add_argument( "--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert." ) parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.") parser.add_argument( "--lora_prefix_unet", default="lora_unet", type=str, help="The prefix of UNet weight in safetensors" ) parser.add_argument( "--lora_prefix_text_encoder", default="lora_te", type=str, help="The prefix of text encoder weight in safetensors", ) parser.add_argument("--alpha", default=0.75, type=float, help="The merging ratio in W = W0 + alpha * deltaW") parser.add_argument( "--to_safetensors", action="store_true", help="Whether to store pipeline in safetensors format or not." ) parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)") UpperCAmelCase_ = parser.parse_args() UpperCAmelCase_ = args.base_model_path UpperCAmelCase_ = args.checkpoint_path UpperCAmelCase_ = args.dump_path UpperCAmelCase_ = args.lora_prefix_unet UpperCAmelCase_ = args.lora_prefix_text_encoder UpperCAmelCase_ = args.alpha UpperCAmelCase_ = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha) UpperCAmelCase_ = pipe.to(args.device) pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
32
from ..utils import DummyObject, requires_backends class __UpperCamelCase ( metaclass=A__ ): __A : str = ["""torch""", """scipy"""] def __init__( self , *_UpperCamelCase , **_UpperCamelCase ): requires_backends(self , ['''torch''', '''scipy'''] ) @classmethod def UpperCamelCase( cls , *_UpperCamelCase , **_UpperCamelCase ): requires_backends(cls , ['''torch''', '''scipy'''] ) @classmethod def UpperCamelCase( cls , *_UpperCamelCase , **_UpperCamelCase ): requires_backends(cls , ['''torch''', '''scipy'''] )
32
1
from datetime import datetime import matplotlib.pyplot as plt import torch def A__ ( SCREAMING_SNAKE_CASE_ : Any ) -> List[Any]: """simple docstring""" for param in module.parameters(): _UpperCAmelCase = False def A__ ( ) -> Tuple: """simple docstring""" _UpperCAmelCase = '''cuda''' if torch.cuda.is_available() else '''cpu''' if torch.backends.mps.is_available() and torch.backends.mps.is_built(): _UpperCAmelCase = '''mps''' if device == "mps": print( '''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch''' ''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues''' ''' with generations.''' ) return device def A__ ( SCREAMING_SNAKE_CASE_ : Any ) -> List[Any]: """simple docstring""" _UpperCAmelCase = plt.imshow(SCREAMING_SNAKE_CASE_ ) fig.axes.get_xaxis().set_visible(SCREAMING_SNAKE_CASE_ ) fig.axes.get_yaxis().set_visible(SCREAMING_SNAKE_CASE_ ) plt.show() def A__ ( ) -> int: """simple docstring""" _UpperCAmelCase = datetime.now() _UpperCAmelCase = current_time.strftime('''%H:%M:%S''' ) return timestamp
32
def A__ ( SCREAMING_SNAKE_CASE_ : int = 2_00_00_00 ) -> int: """simple docstring""" _UpperCAmelCase = [0 for i in range(n + 1 )] _UpperCAmelCase = 1 _UpperCAmelCase = 1 for i in range(2 , int(n**0.5 ) + 1 ): if primality_list[i] == 0: for j in range(i * i , n + 1 , SCREAMING_SNAKE_CASE_ ): _UpperCAmelCase = 1 _UpperCAmelCase = 0 for i in range(SCREAMING_SNAKE_CASE_ ): if primality_list[i] == 0: sum_of_primes += i return sum_of_primes if __name__ == "__main__": print(f'''{solution() = }''')
32
1
from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_herbert import HerbertTokenizer UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} UpperCAmelCase_ = { "vocab_file": { "allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json" }, "merges_file": { "allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt" }, } UpperCAmelCase_ = {"allegro/herbert-base-cased": 5_14} UpperCAmelCase_ = {} class __UpperCamelCase ( A__ ): __A : List[str] = VOCAB_FILES_NAMES __A : List[str] = PRETRAINED_VOCAB_FILES_MAP __A : Optional[int] = PRETRAINED_INIT_CONFIGURATION __A : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __A : Optional[int] = HerbertTokenizer def __init__( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase="<s>" , _UpperCamelCase="<unk>" , _UpperCamelCase="<pad>" , _UpperCamelCase="<mask>" , _UpperCamelCase="</s>" , **_UpperCamelCase , ): super().__init__( _UpperCamelCase , _UpperCamelCase , tokenizer_file=_UpperCamelCase , cls_token=_UpperCamelCase , unk_token=_UpperCamelCase , pad_token=_UpperCamelCase , mask_token=_UpperCamelCase , sep_token=_UpperCamelCase , **_UpperCamelCase , ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None ): _UpperCAmelCase = [self.cls_token_id] _UpperCAmelCase = [self.sep_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase ) if token_ids_a is None: return [1] + ([0] * len(_UpperCamelCase )) + [1] return [1] + ([0] * len(_UpperCamelCase )) + [1] + ([0] * len(_UpperCamelCase )) + [1] def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None ): _UpperCAmelCase = [self.sep_token_id] _UpperCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None ): _UpperCAmelCase = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase ) return tuple(_UpperCamelCase )
32
import warnings from ...utils import logging from .image_processing_glpn import GLPNImageProcessor UpperCAmelCase_ = logging.get_logger(__name__) class __UpperCamelCase ( A__ ): def __init__( self , *_UpperCamelCase , **_UpperCamelCase ): warnings.warn( '''The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use GLPNImageProcessor instead.''' , _UpperCamelCase , ) super().__init__(*_UpperCamelCase , **_UpperCamelCase )
32
1
import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import TimesformerConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, TimesformerForVideoClassification, TimesformerModel, ) from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class __UpperCamelCase : def __init__( self , _UpperCamelCase , _UpperCamelCase=13 , _UpperCamelCase=10 , _UpperCamelCase=3 , _UpperCamelCase=2 , _UpperCamelCase=2 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=32 , _UpperCamelCase=5 , _UpperCamelCase=4 , _UpperCamelCase=37 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=10 , _UpperCamelCase=0.02 , _UpperCamelCase="divided_space_time" , _UpperCamelCase=None , ): _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = image_size _UpperCAmelCase = num_channels _UpperCAmelCase = patch_size _UpperCAmelCase = num_frames _UpperCAmelCase = is_training _UpperCAmelCase = use_labels _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = attention_type _UpperCAmelCase = initializer_range _UpperCAmelCase = scope _UpperCAmelCase = num_labels # in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token _UpperCAmelCase = (image_size // patch_size) ** 2 _UpperCAmelCase = (num_frames) * self.num_patches_per_frame + 1 def UpperCamelCase( self ): _UpperCAmelCase = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels ) _UpperCAmelCase = self.get_config() return config, pixel_values, labels def UpperCamelCase( self ): _UpperCAmelCase = TimesformerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , ) _UpperCAmelCase = self.num_labels return config def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): _UpperCAmelCase = TimesformerModel(config=_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() _UpperCAmelCase = model(_UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): _UpperCAmelCase = TimesformerForVideoClassification(_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() _UpperCAmelCase = model(_UpperCamelCase ) # verify the logits shape _UpperCAmelCase = torch.Size((self.batch_size, self.num_labels) ) self.parent.assertEqual(result.logits.shape , _UpperCamelCase ) def UpperCamelCase( self ): _UpperCAmelCase = self.prepare_config_and_inputs() _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs _UpperCAmelCase = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __UpperCamelCase ( A__ , A__ , unittest.TestCase ): __A : List[Any] = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else () __A : Union[str, Any] = ( {"""feature-extraction""": TimesformerModel, """video-classification""": TimesformerForVideoClassification} if is_torch_available() else {} ) __A : int = False __A : List[str] = False __A : int = False __A : str = False def UpperCamelCase( self ): _UpperCAmelCase = TimesformerModelTester(self ) _UpperCAmelCase = ConfigTester( self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase , hidden_size=37 ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=False ): _UpperCAmelCase = copy.deepcopy(_UpperCamelCase ) if return_labels: if model_class in get_values(_UpperCamelCase ): _UpperCAmelCase = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_UpperCamelCase ) return inputs_dict def UpperCamelCase( self ): self.config_tester.run_common_tests() @unittest.skip(reason='''TimeSformer does not use inputs_embeds''' ) def UpperCamelCase( self ): pass def UpperCamelCase( self ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(_UpperCamelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) _UpperCAmelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_UpperCamelCase , nn.Linear ) ) def UpperCamelCase( self ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(_UpperCamelCase ) _UpperCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCAmelCase = [*signature.parameters.keys()] _UpperCAmelCase = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _UpperCamelCase ) def UpperCamelCase( self ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCamelCase ) def UpperCamelCase( self ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_video_classification(*_UpperCamelCase ) @slow def UpperCamelCase( self ): for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCAmelCase = TimesformerModel.from_pretrained(_UpperCamelCase ) self.assertIsNotNone(_UpperCamelCase ) def UpperCamelCase( self ): if not self.has_attentions: pass else: _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase = True for model_class in self.all_model_classes: _UpperCAmelCase = self.model_tester.seq_length _UpperCAmelCase = self.model_tester.num_frames _UpperCAmelCase = True _UpperCAmelCase = False _UpperCAmelCase = True _UpperCAmelCase = model_class(_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() with torch.no_grad(): _UpperCAmelCase = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) ) _UpperCAmelCase = outputs.attentions self.assertEqual(len(_UpperCamelCase ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] _UpperCAmelCase = True _UpperCAmelCase = model_class(_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() with torch.no_grad(): _UpperCAmelCase = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) ) _UpperCAmelCase = outputs.attentions self.assertEqual(len(_UpperCamelCase ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) _UpperCAmelCase = len(_UpperCamelCase ) # Check attention is always last and order is fine _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = model_class(_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() with torch.no_grad(): _UpperCAmelCase = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) ) self.assertEqual(out_len + 1 , len(_UpperCamelCase ) ) _UpperCAmelCase = outputs.attentions self.assertEqual(len(_UpperCamelCase ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) def UpperCamelCase( self ): def check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): _UpperCAmelCase = model_class(_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() with torch.no_grad(): _UpperCAmelCase = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) ) _UpperCAmelCase = outputs.hidden_states _UpperCAmelCase = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(_UpperCamelCase ) , _UpperCamelCase ) _UpperCAmelCase = self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = True check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _UpperCAmelCase = True check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) def A__ ( ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = hf_hub_download( repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' ) _UpperCAmelCase = np.load(SCREAMING_SNAKE_CASE_ ) return list(SCREAMING_SNAKE_CASE_ ) @require_torch @require_vision class __UpperCamelCase ( unittest.TestCase ): @cached_property def UpperCamelCase( self ): # logits were tested with a different mean and std, so we use the same here return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) if is_vision_available() else None ) @slow def UpperCamelCase( self ): _UpperCAmelCase = TimesformerForVideoClassification.from_pretrained('''facebook/timesformer-base-finetuned-k400''' ).to( _UpperCamelCase ) _UpperCAmelCase = self.default_image_processor _UpperCAmelCase = prepare_video() _UpperCAmelCase = image_processor(video[:8] , return_tensors='''pt''' ).to(_UpperCamelCase ) # forward pass with torch.no_grad(): _UpperCAmelCase = model(**_UpperCamelCase ) # verify the logits _UpperCAmelCase = torch.Size((1, 400) ) self.assertEqual(outputs.logits.shape , _UpperCamelCase ) _UpperCAmelCase = torch.tensor([-0.3016, -0.7713, -0.4205] ).to(_UpperCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1e-4 ) )
32
from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __UpperCamelCase ( A__ ): __A : Dict = ["""image_processor""", """tokenizer"""] __A : List[str] = """BridgeTowerImageProcessor""" __A : str = ("""RobertaTokenizer""", """RobertaTokenizerFast""") def __init__( self , _UpperCamelCase , _UpperCamelCase ): super().__init__(_UpperCamelCase , _UpperCamelCase ) def __call__( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = True , _UpperCamelCase = False , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = 0 , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = True , _UpperCamelCase = None , **_UpperCamelCase , ): _UpperCAmelCase = self.tokenizer( text=_UpperCamelCase , add_special_tokens=_UpperCamelCase , padding=_UpperCamelCase , truncation=_UpperCamelCase , max_length=_UpperCamelCase , stride=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_token_type_ids=_UpperCamelCase , return_attention_mask=_UpperCamelCase , return_overflowing_tokens=_UpperCamelCase , return_special_tokens_mask=_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , return_length=_UpperCamelCase , verbose=_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase , ) # add pixel_values + pixel_mask _UpperCAmelCase = self.image_processor( _UpperCamelCase , return_tensors=_UpperCamelCase , do_normalize=_UpperCamelCase , do_center_crop=_UpperCamelCase , **_UpperCamelCase ) encoding.update(_UpperCamelCase ) return encoding def UpperCamelCase( self , *_UpperCamelCase , **_UpperCamelCase ): return self.tokenizer.batch_decode(*_UpperCamelCase , **_UpperCamelCase ) def UpperCamelCase( self , *_UpperCamelCase , **_UpperCamelCase ): return self.tokenizer.decode(*_UpperCamelCase , **_UpperCamelCase ) @property def UpperCamelCase( self ): _UpperCAmelCase = self.tokenizer.model_input_names _UpperCAmelCase = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
32
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available UpperCAmelCase_ = { "configuration_chinese_clip": [ "CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", "ChineseCLIPConfig", "ChineseCLIPOnnxConfig", "ChineseCLIPTextConfig", "ChineseCLIPVisionConfig", ], "processing_chinese_clip": ["ChineseCLIPProcessor"], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = ["ChineseCLIPFeatureExtractor"] UpperCAmelCase_ = ["ChineseCLIPImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ "CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "ChineseCLIPModel", "ChineseCLIPPreTrainedModel", "ChineseCLIPTextModel", "ChineseCLIPVisionModel", ] if TYPE_CHECKING: from .configuration_chinese_clip import ( CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, ChineseCLIPConfig, ChineseCLIPOnnxConfig, ChineseCLIPTextConfig, ChineseCLIPVisionConfig, ) from .processing_chinese_clip import ChineseCLIPProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_chinese_clip import ( CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, ChineseCLIPModel, ChineseCLIPPreTrainedModel, ChineseCLIPTextModel, ChineseCLIPVisionModel, ) else: import sys UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
32
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available UpperCAmelCase_ = { "configuration_xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMOnnxConfig"], "tokenization_xlm": ["XLMTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ "XLM_PRETRAINED_MODEL_ARCHIVE_LIST", "XLMForMultipleChoice", "XLMForQuestionAnswering", "XLMForQuestionAnsweringSimple", "XLMForSequenceClassification", "XLMForTokenClassification", "XLMModel", "XLMPreTrainedModel", "XLMWithLMHeadModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ "TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST", "TFXLMForMultipleChoice", "TFXLMForQuestionAnsweringSimple", "TFXLMForSequenceClassification", "TFXLMForTokenClassification", "TFXLMMainLayer", "TFXLMModel", "TFXLMPreTrainedModel", "TFXLMWithLMHeadModel", ] if TYPE_CHECKING: from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig from .tokenization_xlm import XLMTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm import ( XLM_PRETRAINED_MODEL_ARCHIVE_LIST, XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMPreTrainedModel, XLMWithLMHeadModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm import ( TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMForMultipleChoice, TFXLMForQuestionAnsweringSimple, TFXLMForSequenceClassification, TFXLMForTokenClassification, TFXLMMainLayer, TFXLMModel, TFXLMPreTrainedModel, TFXLMWithLMHeadModel, ) else: import sys UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
32
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, is_batched, to_numpy_array, valid_images, ) from ...utils import TensorType, logging UpperCAmelCase_ = logging.get_logger(__name__) class __UpperCamelCase ( A__ ): __A : int = ["""pixel_values"""] def __init__( self , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = PILImageResampling.BICUBIC , _UpperCamelCase = True , _UpperCamelCase = True , _UpperCamelCase = 1 / 255 , _UpperCamelCase = None , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ): super().__init__(**_UpperCamelCase ) _UpperCAmelCase = size if size is not None else {'''height''': 224, '''width''': 224} _UpperCAmelCase = get_size_dict(_UpperCamelCase ) _UpperCAmelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} _UpperCAmelCase = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase , param_name='''crop_size''' ) _UpperCAmelCase = do_resize _UpperCAmelCase = do_rescale _UpperCAmelCase = do_normalize _UpperCAmelCase = do_center_crop _UpperCAmelCase = crop_size _UpperCAmelCase = size _UpperCAmelCase = resample _UpperCAmelCase = rescale_factor _UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN _UpperCAmelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = PILImageResampling.BILINEAR , _UpperCamelCase = None , **_UpperCamelCase , ): _UpperCAmelCase = get_size_dict(_UpperCamelCase ) if "shortest_edge" in size: _UpperCAmelCase = get_resize_output_image_size(_UpperCamelCase , size=size['''shortest_edge'''] , default_to_square=_UpperCamelCase ) # size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"]) elif "height" in size and "width" in size: _UpperCAmelCase = (size['''height'''], size['''width''']) else: raise ValueError(f'''Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}''' ) return resize(_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ): _UpperCAmelCase = get_size_dict(_UpperCamelCase ) if "height" not in size or "width" not in size: raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(_UpperCamelCase , size=(size['''height'''], size['''width''']) , data_format=_UpperCamelCase , **_UpperCamelCase ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase ): return rescale(_UpperCamelCase , scale=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ): return normalize(_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = ChannelDimension.FIRST , **_UpperCamelCase , ): _UpperCAmelCase = do_resize if do_resize is not None else self.do_resize _UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale _UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize _UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop _UpperCAmelCase = crop_size if crop_size is not None else self.crop_size _UpperCAmelCase = get_size_dict(_UpperCamelCase , param_name='''crop_size''' , default_to_square=_UpperCamelCase ) _UpperCAmelCase = resample if resample is not None else self.resample _UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor _UpperCAmelCase = image_mean if image_mean is not None else self.image_mean _UpperCAmelCase = image_std if image_std is not None else self.image_std _UpperCAmelCase = size if size is not None else self.size _UpperCAmelCase = get_size_dict(_UpperCamelCase ) if not is_batched(_UpperCamelCase ): _UpperCAmelCase = [images] if not valid_images(_UpperCamelCase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) # All transformations expect numpy arrays. _UpperCAmelCase = [to_numpy_array(_UpperCamelCase ) for image in images] if do_resize: _UpperCAmelCase = [self.resize(image=_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase ) for image in images] if do_center_crop: _UpperCAmelCase = [self.center_crop(image=_UpperCamelCase , size=_UpperCamelCase ) for image in images] if do_rescale: _UpperCAmelCase = [self.rescale(image=_UpperCamelCase , scale=_UpperCamelCase ) for image in images] if do_normalize: _UpperCAmelCase = [self.normalize(image=_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase ) for image in images] _UpperCAmelCase = [to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase ) for image in images] _UpperCAmelCase = {'''pixel_values''': images} return BatchFeature(data=_UpperCamelCase , tensor_type=_UpperCamelCase )
32
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { "microsoft/biogpt": "https://huggingface.co/microsoft/biogpt/resolve/main/config.json", # See all BioGPT models at https://huggingface.co/models?filter=biogpt } class __UpperCamelCase ( A__ ): __A : Any = """biogpt""" def __init__( self , _UpperCamelCase=42384 , _UpperCamelCase=1024 , _UpperCamelCase=24 , _UpperCamelCase=16 , _UpperCamelCase=4096 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=1024 , _UpperCamelCase=0.02 , _UpperCamelCase=1e-12 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=0.0 , _UpperCamelCase=0.0 , _UpperCamelCase=1 , _UpperCamelCase=0 , _UpperCamelCase=2 , **_UpperCamelCase , ): _UpperCAmelCase = vocab_size _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = scale_embedding _UpperCAmelCase = use_cache _UpperCAmelCase = layerdrop _UpperCAmelCase = activation_dropout super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
32
1
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { "weiweishi/roc-bert-base-zh": "https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json", } class __UpperCamelCase ( A__ ): __A : List[Any] = """roc_bert""" def __init__( self , _UpperCamelCase=30522 , _UpperCamelCase=768 , _UpperCamelCase=12 , _UpperCamelCase=12 , _UpperCamelCase=3072 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=512 , _UpperCamelCase=2 , _UpperCamelCase=0.02 , _UpperCamelCase=1e-12 , _UpperCamelCase=True , _UpperCamelCase=0 , _UpperCamelCase="absolute" , _UpperCamelCase=None , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=768 , _UpperCamelCase=910 , _UpperCamelCase=512 , _UpperCamelCase=24858 , _UpperCamelCase=True , **_UpperCamelCase , ): _UpperCAmelCase = vocab_size _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = initializer_range _UpperCAmelCase = type_vocab_size _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = use_cache _UpperCAmelCase = enable_pronunciation _UpperCAmelCase = enable_shape _UpperCAmelCase = pronunciation_embed_dim _UpperCAmelCase = pronunciation_vocab_size _UpperCAmelCase = shape_embed_dim _UpperCAmelCase = shape_vocab_size _UpperCAmelCase = concat_input _UpperCAmelCase = position_embedding_type _UpperCAmelCase = classifier_dropout super().__init__(pad_token_id=_UpperCamelCase , **_UpperCamelCase )
32
from typing import List from .keymap import KEYMAP, get_character def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> List[str]: """simple docstring""" def decorator(SCREAMING_SNAKE_CASE_ : List[Any] ): _UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''handle_key''' , [] ) handle += [key] setattr(SCREAMING_SNAKE_CASE_ , '''handle_key''' , SCREAMING_SNAKE_CASE_ ) return func return decorator def A__ ( *SCREAMING_SNAKE_CASE_ : List[str] ) -> Dict: """simple docstring""" def decorator(SCREAMING_SNAKE_CASE_ : Any ): _UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''handle_key''' , [] ) handle += keys setattr(SCREAMING_SNAKE_CASE_ , '''handle_key''' , SCREAMING_SNAKE_CASE_ ) return func return decorator class __UpperCamelCase ( A__ ): def __new__( cls , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): _UpperCAmelCase = super().__new__(cls , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) if not hasattr(_UpperCamelCase , '''key_handler''' ): setattr(_UpperCamelCase , '''key_handler''' , {} ) setattr(_UpperCamelCase , '''handle_input''' , KeyHandler.handle_input ) for value in attrs.values(): _UpperCAmelCase = getattr(_UpperCamelCase , '''handle_key''' , [] ) for key in handled_keys: _UpperCAmelCase = value return new_cls @staticmethod def UpperCamelCase( cls ): _UpperCAmelCase = get_character() if char != KEYMAP["undefined"]: _UpperCAmelCase = ord(_UpperCamelCase ) _UpperCAmelCase = cls.key_handler.get(_UpperCamelCase ) if handler: _UpperCAmelCase = char return handler(cls ) else: return None def A__ ( cls : Union[str, Any] ) -> Any: """simple docstring""" return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
32
1
def A__ ( SCREAMING_SNAKE_CASE_ : int = 2_00_00_00 ) -> int: """simple docstring""" _UpperCAmelCase = [0 for i in range(n + 1 )] _UpperCAmelCase = 1 _UpperCAmelCase = 1 for i in range(2 , int(n**0.5 ) + 1 ): if primality_list[i] == 0: for j in range(i * i , n + 1 , SCREAMING_SNAKE_CASE_ ): _UpperCAmelCase = 1 _UpperCAmelCase = 0 for i in range(SCREAMING_SNAKE_CASE_ ): if primality_list[i] == 0: sum_of_primes += i return sum_of_primes if __name__ == "__main__": print(f'''{solution() = }''')
32
import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class __UpperCamelCase : def __init__( self , _UpperCamelCase , _UpperCamelCase=13 , _UpperCamelCase=7 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=99 , _UpperCamelCase=24 , _UpperCamelCase=2 , _UpperCamelCase=6 , _UpperCamelCase=37 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=512 , _UpperCamelCase=16 , _UpperCamelCase=2 , _UpperCamelCase=0.02 , _UpperCamelCase=3 , _UpperCamelCase=None , _UpperCamelCase=1000 , ): _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = seq_length _UpperCAmelCase = is_training _UpperCAmelCase = use_input_mask _UpperCAmelCase = use_token_type_ids _UpperCAmelCase = use_labels _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = type_sequence_label_size _UpperCAmelCase = initializer_range _UpperCAmelCase = num_labels _UpperCAmelCase = scope _UpperCAmelCase = range_bbox def UpperCamelCase( self ): _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: _UpperCAmelCase = bbox[i, j, 3] _UpperCAmelCase = bbox[i, j, 1] _UpperCAmelCase = t if bbox[i, j, 2] < bbox[i, j, 0]: _UpperCAmelCase = bbox[i, j, 2] _UpperCAmelCase = bbox[i, j, 0] _UpperCAmelCase = t _UpperCAmelCase = None if self.use_input_mask: _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) _UpperCAmelCase = None if self.use_token_type_ids: _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _UpperCAmelCase = None _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _UpperCAmelCase = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def UpperCamelCase( self ): return LiltConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ): _UpperCAmelCase = LiltModel(config=_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() _UpperCAmelCase = model(_UpperCamelCase , bbox=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase ) _UpperCAmelCase = model(_UpperCamelCase , bbox=_UpperCamelCase , token_type_ids=_UpperCamelCase ) _UpperCAmelCase = model(_UpperCamelCase , bbox=_UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ): _UpperCAmelCase = self.num_labels _UpperCAmelCase = LiltForTokenClassification(config=_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() _UpperCAmelCase = model( _UpperCamelCase , bbox=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ): _UpperCAmelCase = LiltForQuestionAnswering(config=_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() _UpperCAmelCase = model( _UpperCamelCase , bbox=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , start_positions=_UpperCamelCase , end_positions=_UpperCamelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCamelCase( self ): _UpperCAmelCase = self.prepare_config_and_inputs() ( ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ) = config_and_inputs _UpperCAmelCase = { '''input_ids''': input_ids, '''bbox''': bbox, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask, } return config, inputs_dict @require_torch class __UpperCamelCase ( A__ , A__ , A__ , unittest.TestCase ): __A : Dict = ( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) __A : Optional[Any] = ( { """feature-extraction""": LiltModel, """question-answering""": LiltForQuestionAnswering, """text-classification""": LiltForSequenceClassification, """token-classification""": LiltForTokenClassification, """zero-shot""": LiltForSequenceClassification, } if is_torch_available() else {} ) __A : List[Any] = False __A : Optional[int] = False def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): return True def UpperCamelCase( self ): _UpperCAmelCase = LiltModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=_UpperCamelCase , hidden_size=37 ) def UpperCamelCase( self ): self.config_tester.run_common_tests() def UpperCamelCase( self ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCamelCase ) def UpperCamelCase( self ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: _UpperCAmelCase = type self.model_tester.create_and_check_model(*_UpperCamelCase ) def UpperCamelCase( self ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_UpperCamelCase ) def UpperCamelCase( self ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_UpperCamelCase ) @slow def UpperCamelCase( self ): for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCAmelCase = LiltModel.from_pretrained(_UpperCamelCase ) self.assertIsNotNone(_UpperCamelCase ) @require_torch @slow class __UpperCamelCase ( unittest.TestCase ): def UpperCamelCase( self ): _UpperCAmelCase = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(_UpperCamelCase ) _UpperCAmelCase = torch.tensor([[1, 2]] , device=_UpperCamelCase ) _UpperCAmelCase = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=_UpperCamelCase ) # forward pass with torch.no_grad(): _UpperCAmelCase = model(input_ids=_UpperCamelCase , bbox=_UpperCamelCase ) _UpperCAmelCase = torch.Size([1, 2, 768] ) _UpperCAmelCase = torch.tensor( [[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=_UpperCamelCase , ) self.assertTrue(outputs.last_hidden_state.shape , _UpperCamelCase ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , _UpperCamelCase , atol=1e-3 ) )
32
1
import os import tempfile import unittest from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter from transformers.testing_utils import slow from transformers.utils import cached_property @unittest.skipUnless(os.path.exists(A__ ) , """Tatoeba directory does not exist.""" ) class __UpperCamelCase ( unittest.TestCase ): @cached_property def UpperCamelCase( self ): _UpperCAmelCase = tempfile.mkdtemp() return TatoebaConverter(save_dir=_UpperCamelCase ) @slow def UpperCamelCase( self ): self.resolver.convert_models(['''heb-eng'''] ) @slow def UpperCamelCase( self ): _UpperCAmelCase , _UpperCAmelCase = self.resolver.write_model_card('''opus-mt-he-en''' , dry_run=_UpperCamelCase ) assert mmeta["long_pair"] == "heb-eng"
32
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { "RWKV/rwkv-4-169m-pile": "https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json", "RWKV/rwkv-4-430m-pile": "https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json", "RWKV/rwkv-4-1b5-pile": "https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json", "RWKV/rwkv-4-3b-pile": "https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json", "RWKV/rwkv-4-7b-pile": "https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json", "RWKV/rwkv-4-14b-pile": "https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json", "RWKV/rwkv-raven-1b5": "https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json", "RWKV/rwkv-raven-3b": "https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json", "RWKV/rwkv-raven-7b": "https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json", "RWKV/rwkv-raven-14b": "https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json", } class __UpperCamelCase ( A__ ): __A : Tuple = """rwkv""" __A : Any = {"""max_position_embeddings""": """context_length"""} def __init__( self , _UpperCamelCase=50277 , _UpperCamelCase=1024 , _UpperCamelCase=4096 , _UpperCamelCase=32 , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=1e-5 , _UpperCamelCase=0 , _UpperCamelCase=0 , _UpperCamelCase=6 , _UpperCamelCase=False , _UpperCamelCase=True , **_UpperCamelCase , ): _UpperCAmelCase = vocab_size _UpperCAmelCase = context_length _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = attention_hidden_size if attention_hidden_size is not None else hidden_size _UpperCAmelCase = intermediate_size if intermediate_size is not None else 4 * hidden_size _UpperCAmelCase = layer_norm_epsilon _UpperCAmelCase = rescale_every _UpperCAmelCase = use_cache _UpperCAmelCase = bos_token_id _UpperCAmelCase = eos_token_id super().__init__( tie_word_embeddings=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
32
1
import bza import gzip import lzma import os import shutil import struct import tarfile import warnings import zipfile from abc import ABC, abstractmethod from pathlib import Path from typing import Dict, List, Optional, Type, Union from .. import config from .filelock import FileLock from .logging import get_logger UpperCAmelCase_ = get_logger(__name__) class __UpperCamelCase : def __init__( self , _UpperCamelCase = None ): _UpperCAmelCase = ( os.path.join(_UpperCamelCase , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH ) _UpperCAmelCase = Extractor def UpperCamelCase( self , _UpperCamelCase ): from .file_utils import hash_url_to_filename # Path where we extract compressed archives # We extract in the cache dir, and get the extracted path name by hashing the original path" _UpperCAmelCase = os.path.abspath(_UpperCamelCase ) return os.path.join(self.extract_dir , hash_url_to_filename(_UpperCamelCase ) ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase ): return force_extract or ( not os.path.isfile(_UpperCamelCase ) and not (os.path.isdir(_UpperCamelCase ) and os.listdir(_UpperCamelCase )) ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = False ): _UpperCAmelCase = self.extractor.infer_extractor_format(_UpperCamelCase ) if not extractor_format: return input_path _UpperCAmelCase = self._get_output_path(_UpperCamelCase ) if self._do_extract(_UpperCamelCase , _UpperCamelCase ): self.extractor.extract(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) return output_path class __UpperCamelCase ( A__ ): @classmethod @abstractmethod def UpperCamelCase( cls , _UpperCamelCase , **_UpperCamelCase ): ... @staticmethod @abstractmethod def UpperCamelCase( _UpperCamelCase , _UpperCamelCase ): ... class __UpperCamelCase ( A__ , A__ ): __A : List[bytes] = [] @staticmethod def UpperCamelCase( _UpperCamelCase , _UpperCamelCase ): with open(_UpperCamelCase , '''rb''' ) as f: return f.read(_UpperCamelCase ) @classmethod def UpperCamelCase( cls , _UpperCamelCase , _UpperCamelCase = b"" ): if not magic_number: _UpperCAmelCase = max(len(_UpperCamelCase ) for cls_magic_number in cls.magic_numbers ) try: _UpperCAmelCase = cls.read_magic_number(_UpperCamelCase , _UpperCamelCase ) except OSError: return False return any(magic_number.startswith(_UpperCamelCase ) for cls_magic_number in cls.magic_numbers ) class __UpperCamelCase ( A__ ): @classmethod def UpperCamelCase( cls , _UpperCamelCase , **_UpperCamelCase ): return tarfile.is_tarfile(_UpperCamelCase ) @staticmethod def UpperCamelCase( _UpperCamelCase , _UpperCamelCase ): def resolved(_UpperCamelCase ) -> str: return os.path.realpath(os.path.abspath(_UpperCamelCase ) ) def badpath(_UpperCamelCase , _UpperCamelCase ) -> bool: # joinpath will ignore base if path is absolute return not resolved(os.path.join(_UpperCamelCase , _UpperCamelCase ) ).startswith(_UpperCamelCase ) def badlink(_UpperCamelCase , _UpperCamelCase ) -> bool: # Links are interpreted relative to the directory containing the link _UpperCAmelCase = resolved(os.path.join(_UpperCamelCase , os.path.dirname(info.name ) ) ) return badpath(info.linkname , base=_UpperCamelCase ) _UpperCAmelCase = resolved(_UpperCamelCase ) for finfo in members: if badpath(finfo.name , _UpperCamelCase ): logger.error(f'''Extraction of {finfo.name} is blocked (illegal path)''' ) elif finfo.issym() and badlink(_UpperCamelCase , _UpperCamelCase ): logger.error(f'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''' ) elif finfo.islnk() and badlink(_UpperCamelCase , _UpperCamelCase ): logger.error(f'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''' ) else: yield finfo @staticmethod def UpperCamelCase( _UpperCamelCase , _UpperCamelCase ): os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase ) _UpperCAmelCase = tarfile.open(_UpperCamelCase ) tar_file.extractall(_UpperCamelCase , members=TarExtractor.safemembers(_UpperCamelCase , _UpperCamelCase ) ) tar_file.close() class __UpperCamelCase ( A__ ): __A : int = [b"""\x1F\x8B"""] @staticmethod def UpperCamelCase( _UpperCamelCase , _UpperCamelCase ): with gzip.open(_UpperCamelCase , '''rb''' ) as gzip_file: with open(_UpperCamelCase , '''wb''' ) as extracted_file: shutil.copyfileobj(_UpperCamelCase , _UpperCamelCase ) class __UpperCamelCase ( A__ ): __A : Optional[Any] = [ b"""PK\x03\x04""", b"""PK\x05\x06""", # empty archive b"""PK\x07\x08""", # spanned archive ] @classmethod def UpperCamelCase( cls , _UpperCamelCase , _UpperCamelCase = b"" ): if super().is_extractable(_UpperCamelCase , magic_number=_UpperCamelCase ): return True try: # Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives. # From: https://github.com/python/cpython/pull/5053 from zipfile import ( _CD_SIGNATURE, _ECD_DISK_NUMBER, _ECD_DISK_START, _ECD_ENTRIES_TOTAL, _ECD_OFFSET, _ECD_SIZE, _EndRecData, sizeCentralDir, stringCentralDir, structCentralDir, ) with open(_UpperCamelCase , '''rb''' ) as fp: _UpperCAmelCase = _EndRecData(_UpperCamelCase ) if endrec: if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0: return True # Empty zipfiles are still zipfiles elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]: fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir: _UpperCAmelCase = fp.read(_UpperCamelCase ) # CD is where we expect it to be if len(_UpperCamelCase ) == sizeCentralDir: _UpperCAmelCase = struct.unpack(_UpperCamelCase , _UpperCamelCase ) # CD is the right size if centdir[_CD_SIGNATURE] == stringCentralDir: return True # First central directory entry has correct magic number return False except Exception: # catch all errors in case future python versions change the zipfile internals return False @staticmethod def UpperCamelCase( _UpperCamelCase , _UpperCamelCase ): os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase ) with zipfile.ZipFile(_UpperCamelCase , '''r''' ) as zip_file: zip_file.extractall(_UpperCamelCase ) zip_file.close() class __UpperCamelCase ( A__ ): __A : str = [b"""\xFD\x37\x7A\x58\x5A\x00"""] @staticmethod def UpperCamelCase( _UpperCamelCase , _UpperCamelCase ): with lzma.open(_UpperCamelCase ) as compressed_file: with open(_UpperCamelCase , '''wb''' ) as extracted_file: shutil.copyfileobj(_UpperCamelCase , _UpperCamelCase ) class __UpperCamelCase ( A__ ): __A : str = [b"""Rar!\x1a\x07\x00""", b"""Rar!\x1a\x07\x01\x00"""] # RAR_ID # RAR5_ID @staticmethod def UpperCamelCase( _UpperCamelCase , _UpperCamelCase ): if not config.RARFILE_AVAILABLE: raise ImportError('''Please pip install rarfile''' ) import rarfile os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase ) _UpperCAmelCase = rarfile.RarFile(_UpperCamelCase ) rf.extractall(_UpperCamelCase ) rf.close() class __UpperCamelCase ( A__ ): __A : Any = [b"""\x28\xb5\x2F\xFD"""] @staticmethod def UpperCamelCase( _UpperCamelCase , _UpperCamelCase ): if not config.ZSTANDARD_AVAILABLE: raise ImportError('''Please pip install zstandard''' ) import zstandard as zstd _UpperCAmelCase = zstd.ZstdDecompressor() with open(_UpperCamelCase , '''rb''' ) as ifh, open(_UpperCamelCase , '''wb''' ) as ofh: dctx.copy_stream(_UpperCamelCase , _UpperCamelCase ) class __UpperCamelCase ( A__ ): __A : Tuple = [b"""\x42\x5A\x68"""] @staticmethod def UpperCamelCase( _UpperCamelCase , _UpperCamelCase ): with bza.open(_UpperCamelCase , '''rb''' ) as compressed_file: with open(_UpperCamelCase , '''wb''' ) as extracted_file: shutil.copyfileobj(_UpperCamelCase , _UpperCamelCase ) class __UpperCamelCase ( A__ ): __A : Union[str, Any] = [b"""\x37\x7A\xBC\xAF\x27\x1C"""] @staticmethod def UpperCamelCase( _UpperCamelCase , _UpperCamelCase ): if not config.PY7ZR_AVAILABLE: raise ImportError('''Please pip install py7zr''' ) import pyazr os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase ) with pyazr.SevenZipFile(_UpperCamelCase , '''r''' ) as archive: archive.extractall(_UpperCamelCase ) class __UpperCamelCase ( A__ ): __A : Optional[Any] = [b"""\x04\x22\x4D\x18"""] @staticmethod def UpperCamelCase( _UpperCamelCase , _UpperCamelCase ): if not config.LZ4_AVAILABLE: raise ImportError('''Please pip install lz4''' ) import lza.frame with lza.frame.open(_UpperCamelCase , '''rb''' ) as compressed_file: with open(_UpperCamelCase , '''wb''' ) as extracted_file: shutil.copyfileobj(_UpperCamelCase , _UpperCamelCase ) class __UpperCamelCase : # Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip) __A : Dict[str, Type[BaseExtractor]] = { "tar": TarExtractor, "gzip": GzipExtractor, "zip": ZipExtractor, "xz": XzExtractor, "rar": RarExtractor, "zstd": ZstdExtractor, "bz2": BzipaExtractor, "7z": SevenZipExtractor, # <Added version="2.4.0"/> "lz4": LzaExtractor, # <Added version="2.4.0"/> } @classmethod def UpperCamelCase( cls ): return max( len(_UpperCamelCase ) for extractor in cls.extractors.values() if issubclass(_UpperCamelCase , _UpperCamelCase ) for extractor_magic_number in extractor.magic_numbers ) @staticmethod def UpperCamelCase( _UpperCamelCase , _UpperCamelCase ): try: return MagicNumberBaseExtractor.read_magic_number(_UpperCamelCase , magic_number_length=_UpperCamelCase ) except OSError: return b"" @classmethod def UpperCamelCase( cls , _UpperCamelCase , _UpperCamelCase = False ): warnings.warn( '''Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. ''' '''Use \'infer_extractor_format\' instead.''' , category=_UpperCamelCase , ) _UpperCAmelCase = cls.infer_extractor_format(_UpperCamelCase ) if extractor_format: return True if not return_extractor else (True, cls.extractors[extractor_format]) return False if not return_extractor else (False, None) @classmethod def UpperCamelCase( cls , _UpperCamelCase ): # <Added version="2.4.0"/> _UpperCAmelCase = cls._get_magic_number_max_length() _UpperCAmelCase = cls._read_magic_number(_UpperCamelCase , _UpperCamelCase ) for extractor_format, extractor in cls.extractors.items(): if extractor.is_extractable(_UpperCamelCase , magic_number=_UpperCamelCase ): return extractor_format @classmethod def UpperCamelCase( cls , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = "deprecated" , ): os.makedirs(os.path.dirname(_UpperCamelCase ) , exist_ok=_UpperCamelCase ) # Prevent parallel extractions _UpperCAmelCase = str(Path(_UpperCamelCase ).with_suffix('''.lock''' ) ) with FileLock(_UpperCamelCase ): shutil.rmtree(_UpperCamelCase , ignore_errors=_UpperCamelCase ) if extractor_format or extractor != "deprecated": if extractor != "deprecated" or not isinstance(_UpperCamelCase , _UpperCamelCase ): # passed as positional arg warnings.warn( '''Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. ''' '''Use \'extractor_format\' instead.''' , category=_UpperCamelCase , ) _UpperCAmelCase = extractor if extractor != '''deprecated''' else extractor_format else: _UpperCAmelCase = cls.extractors[extractor_format] return extractor.extract(_UpperCamelCase , _UpperCamelCase ) else: warnings.warn( '''Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an ''' '''exception in 3.0.0.''' , category=_UpperCamelCase , ) for extractor in cls.extractors.values(): if extractor.is_extractable(_UpperCamelCase ): return extractor.extract(_UpperCamelCase , _UpperCamelCase )
32
def A__ ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> str: """simple docstring""" if a < 0 or b < 0: raise ValueError('''the value of both inputs must be positive''' ) _UpperCAmelCase = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] # remove the leading "0b" _UpperCAmelCase = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] # remove the leading "0b" _UpperCAmelCase = max(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) ) return "0b" + "".join( str(int(char_a == '''1''' and char_b == '''1''' ) ) for char_a, char_b in zip(a_binary.zfill(SCREAMING_SNAKE_CASE_ ) , b_binary.zfill(SCREAMING_SNAKE_CASE_ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
32
1
import sys UpperCAmelCase_ = ( "73167176531330624919225119674426574742355349194934" "96983520312774506326239578318016984801869478851843" "85861560789112949495459501737958331952853208805511" "12540698747158523863050715693290963295227443043557" "66896648950445244523161731856403098711121722383113" "62229893423380308135336276614282806444486645238749" "30358907296290491560440772390713810515859307960866" "70172427121883998797908792274921901699720888093776" "65727333001053367881220235421809751254540594752243" "52584907711670556013604839586446706324415722155397" "53697817977846174064955149290862569321978468622482" "83972241375657056057490261407972968652414535100474" "82166370484403199890008895243450658541227588666881" "16427171479924442928230863465674813919123162824586" "17866458359124566529476545682848912883142607690042" "24219022671055626321111109370544217506941658960408" "07198403850962455444362981230987879927244284909188" "84580156166097919133875499200524063689912560717606" "05886116467109405077541002256983155200055935729725" "71636269561882670428252483600823257530420752963450" ) def A__ ( SCREAMING_SNAKE_CASE_ : str = N ) -> int: """simple docstring""" _UpperCAmelCase = -sys.maxsize - 1 for i in range(len(SCREAMING_SNAKE_CASE_ ) - 12 ): _UpperCAmelCase = 1 for j in range(13 ): product *= int(n[i + j] ) if product > largest_product: _UpperCAmelCase = product return largest_product if __name__ == "__main__": print(f'''{solution() = }''')
32
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { "tiiuae/falcon-40b": "https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json", "tiiuae/falcon-7b": "https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json", } class __UpperCamelCase ( A__ ): __A : Dict = """falcon""" __A : Any = ["""past_key_values"""] def __init__( self , _UpperCamelCase=65024 , _UpperCamelCase=4544 , _UpperCamelCase=32 , _UpperCamelCase=71 , _UpperCamelCase=1e-5 , _UpperCamelCase=0.02 , _UpperCamelCase=True , _UpperCamelCase=0.0 , _UpperCamelCase=0.0 , _UpperCamelCase=None , _UpperCamelCase=False , _UpperCamelCase=False , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=False , _UpperCamelCase=11 , _UpperCamelCase=11 , **_UpperCamelCase , ): _UpperCAmelCase = vocab_size # Backward compatibility with n_embed kwarg _UpperCAmelCase = kwargs.pop('''n_embed''' , _UpperCamelCase ) _UpperCAmelCase = hidden_size if n_embed is None else n_embed _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = layer_norm_epsilon _UpperCAmelCase = initializer_range _UpperCAmelCase = use_cache _UpperCAmelCase = hidden_dropout _UpperCAmelCase = attention_dropout _UpperCAmelCase = bos_token_id _UpperCAmelCase = eos_token_id _UpperCAmelCase = num_attention_heads if num_kv_heads is None else num_kv_heads _UpperCAmelCase = alibi _UpperCAmelCase = new_decoder_architecture _UpperCAmelCase = multi_query # Ignored when new_decoder_architecture is True _UpperCAmelCase = parallel_attn _UpperCAmelCase = bias super().__init__(bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase ) @property def UpperCamelCase( self ): return self.hidden_size // self.num_attention_heads @property def UpperCamelCase( self ): return not self.alibi
32
1
def A__ ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Optional[Any]: """simple docstring""" if not head: return True # split the list to two parts _UpperCAmelCase , _UpperCAmelCase = head.next, head while fast and fast.next: _UpperCAmelCase = fast.next.next _UpperCAmelCase = slow.next _UpperCAmelCase = slow.next _UpperCAmelCase = None # Don't forget here! But forget still works! # reverse the second part _UpperCAmelCase = None while second: _UpperCAmelCase = second.next _UpperCAmelCase = node _UpperCAmelCase = second _UpperCAmelCase = nxt # compare two parts # second part has the same or one less node while node: if node.val != head.val: return False _UpperCAmelCase = node.next _UpperCAmelCase = head.next return True def A__ ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Optional[int]: """simple docstring""" if not head or not head.next: return True # 1. Get the midpoint (slow) _UpperCAmelCase = _UpperCAmelCase = _UpperCAmelCase = head while fast and fast.next: _UpperCAmelCase , _UpperCAmelCase = fast.next.next, slow.next # 2. Push the second half into the stack _UpperCAmelCase = [slow.val] while slow.next: _UpperCAmelCase = slow.next stack.append(slow.val ) # 3. Comparison while stack: if stack.pop() != cur.val: return False _UpperCAmelCase = cur.next return True def A__ ( SCREAMING_SNAKE_CASE_ : Tuple ) -> Dict: """simple docstring""" if not head or not head.next: return True _UpperCAmelCase = {} _UpperCAmelCase = 0 while head: if head.val in d: d[head.val].append(SCREAMING_SNAKE_CASE_ ) else: _UpperCAmelCase = [pos] _UpperCAmelCase = head.next pos += 1 _UpperCAmelCase = pos - 1 _UpperCAmelCase = 0 for v in d.values(): if len(SCREAMING_SNAKE_CASE_ ) % 2 != 0: middle += 1 else: _UpperCAmelCase = 0 for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) ): if v[i] + v[len(SCREAMING_SNAKE_CASE_ ) - 1 - step] != checksum: return False step += 1 if middle > 1: return False return True
32
from math import sqrt def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> bool: """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def A__ ( SCREAMING_SNAKE_CASE_ : int = 1_00_01 ) -> int: """simple docstring""" _UpperCAmelCase = 0 _UpperCAmelCase = 1 while count != nth and number < 3: number += 1 if is_prime(SCREAMING_SNAKE_CASE_ ): count += 1 while count != nth: number += 2 if is_prime(SCREAMING_SNAKE_CASE_ ): count += 1 return number if __name__ == "__main__": print(f'''{solution() = }''')
32
1
print((lambda quine: quine % quine)("print((lambda quine: quine %% quine)(%r))"))
32
def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> bool: """simple docstring""" if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): _UpperCAmelCase = F'''Input value of [number={number}] must be an integer''' raise TypeError(SCREAMING_SNAKE_CASE_ ) if number < 0: return False _UpperCAmelCase = number * number while number > 0: if number % 10 != number_square % 10: return False number //= 10 number_square //= 10 return True if __name__ == "__main__": import doctest doctest.testmod()
32
1
import numpy as np import pandas as pd from sklearn.preprocessing import Normalizer from sklearn.svm import SVR from statsmodels.tsa.statespace.sarimax import SARIMAX def A__ ( SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : list ) -> float: """simple docstring""" _UpperCAmelCase = np.array([[1, item, train_mtch[i]] for i, item in enumerate(SCREAMING_SNAKE_CASE_ )] ) _UpperCAmelCase = np.array(SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , SCREAMING_SNAKE_CASE_ ) ) , x.transpose() ) , SCREAMING_SNAKE_CASE_ ) return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] ) def A__ ( SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : list ) -> float: """simple docstring""" _UpperCAmelCase = (1, 2, 1) _UpperCAmelCase = (1, 1, 0, 7) _UpperCAmelCase = SARIMAX( SCREAMING_SNAKE_CASE_ , exog=SCREAMING_SNAKE_CASE_ , order=SCREAMING_SNAKE_CASE_ , seasonal_order=SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = model.fit(disp=SCREAMING_SNAKE_CASE_ , maxiter=6_00 , method='''nm''' ) _UpperCAmelCase = model_fit.predict(1 , len(SCREAMING_SNAKE_CASE_ ) , exog=[test_match] ) return result[0] def A__ ( SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : list ) -> float: """simple docstring""" _UpperCAmelCase = SVR(kernel='''rbf''' , C=1 , gamma=0.1 , epsilon=0.1 ) regressor.fit(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = regressor.predict(SCREAMING_SNAKE_CASE_ ) return y_pred[0] def A__ ( SCREAMING_SNAKE_CASE_ : list ) -> float: """simple docstring""" train_user.sort() _UpperCAmelCase = np.percentile(SCREAMING_SNAKE_CASE_ , 25 ) _UpperCAmelCase = np.percentile(SCREAMING_SNAKE_CASE_ , 75 ) _UpperCAmelCase = qa - qa _UpperCAmelCase = qa - (iqr * 0.1) return low_lim def A__ ( SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : float ) -> bool: """simple docstring""" _UpperCAmelCase = 0 _UpperCAmelCase = 0 for i in list_vote: if i > actual_result: _UpperCAmelCase = not_safe + 1 else: if abs(abs(SCREAMING_SNAKE_CASE_ ) - abs(SCREAMING_SNAKE_CASE_ ) ) <= 0.1: safe += 1 else: not_safe += 1 return safe > not_safe if __name__ == "__main__": # data_input_df = pd.read_csv("ex_data.csv", header=None) UpperCAmelCase_ = [[1_82_31, 0.0, 1], [2_26_21, 1.0, 2], [1_56_75, 0.0, 3], [2_35_83, 1.0, 4]] UpperCAmelCase_ = pd.DataFrame( data_input, columns=["total_user", "total_even", "days"] ) UpperCAmelCase_ = Normalizer().fit_transform(data_input_df.values) # split data UpperCAmelCase_ = normalize_df[:, 2].tolist() UpperCAmelCase_ = normalize_df[:, 0].tolist() UpperCAmelCase_ = normalize_df[:, 1].tolist() # for svr (input variable = total date and total match) UpperCAmelCase_ = normalize_df[:, [1, 2]].tolist() UpperCAmelCase_ = x[: len(x) - 1] UpperCAmelCase_ = x[len(x) - 1 :] # for linear regression & sarimax UpperCAmelCase_ = total_date[: len(total_date) - 1] UpperCAmelCase_ = total_user[: len(total_user) - 1] UpperCAmelCase_ = total_match[: len(total_match) - 1] UpperCAmelCase_ = total_date[len(total_date) - 1 :] UpperCAmelCase_ = total_user[len(total_user) - 1 :] UpperCAmelCase_ = total_match[len(total_match) - 1 :] # voting system with forecasting UpperCAmelCase_ = [ linear_regression_prediction( trn_date, trn_user, trn_match, tst_date, tst_match ), sarimax_predictor(trn_user, trn_match, tst_match), support_vector_regressor(x_train, x_test, trn_user), ] # check the safety of today's data UpperCAmelCase_ = "" if data_safety_checker(res_vote, tst_user) else "not " print("Today's data is {not_str}safe.")
32
from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Value from .base import TaskTemplate @dataclass(frozen=A__ ) class __UpperCamelCase ( A__ ): __A : str = field(default="""language-modeling""" , metadata={"""include_in_asdict_even_if_is_default""": True} ) __A : ClassVar[Features] = Features({"""text""": Value("""string""" )} ) __A : ClassVar[Features] = Features({} ) __A : str = "text" @property def UpperCamelCase( self ): return {self.text_column: "text"}
32
1
import os import random import sys from . import cryptomath_module as cryptoMath # noqa: N812 from . import rabin_miller as rabinMiller # noqa: N812 def A__ ( ) -> None: """simple docstring""" print('''Making key files...''' ) make_key_files('''rsa''' , 10_24 ) print('''Key files generation successful.''' ) def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> tuple[tuple[int, int], tuple[int, int]]: """simple docstring""" print('''Generating prime p...''' ) _UpperCAmelCase = rabinMiller.generate_large_prime(SCREAMING_SNAKE_CASE_ ) print('''Generating prime q...''' ) _UpperCAmelCase = rabinMiller.generate_large_prime(SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = p * q print('''Generating e that is relatively prime to (p - 1) * (q - 1)...''' ) while True: _UpperCAmelCase = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) ) if cryptoMath.gcd(SCREAMING_SNAKE_CASE_ , (p - 1) * (q - 1) ) == 1: break print('''Calculating d that is mod inverse of e...''' ) _UpperCAmelCase = cryptoMath.find_mod_inverse(SCREAMING_SNAKE_CASE_ , (p - 1) * (q - 1) ) _UpperCAmelCase = (n, e) _UpperCAmelCase = (n, d) return (public_key, private_key) def A__ ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int ) -> None: """simple docstring""" if os.path.exists(F'''{name}_pubkey.txt''' ) or os.path.exists(F'''{name}_privkey.txt''' ): print('''\nWARNING:''' ) print( F'''"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n''' '''Use a different name or delete these files and re-run this program.''' ) sys.exit() _UpperCAmelCase , _UpperCAmelCase = generate_key(SCREAMING_SNAKE_CASE_ ) print(F'''\nWriting public key to file {name}_pubkey.txt...''' ) with open(F'''{name}_pubkey.txt''' , '''w''' ) as out_file: out_file.write(F'''{key_size},{public_key[0]},{public_key[1]}''' ) print(F'''Writing private key to file {name}_privkey.txt...''' ) with open(F'''{name}_privkey.txt''' , '''w''' ) as out_file: out_file.write(F'''{key_size},{private_key[0]},{private_key[1]}''' ) if __name__ == "__main__": main()
32
import os import re import warnings from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer if TYPE_CHECKING: from ...tokenization_utils_base import TextInput from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = {"vocab_file": "spiece.model"} UpperCAmelCase_ = { "vocab_file": { "t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model", "t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model", "t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model", "t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model", "t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model", } } # TODO(PVP) - this should be removed in Transformers v5 UpperCAmelCase_ = { "t5-small": 5_12, "t5-base": 5_12, "t5-large": 5_12, "t5-3b": 5_12, "t5-11b": 5_12, } UpperCAmelCase_ = "▁" class __UpperCamelCase ( A__ ): __A : Any = VOCAB_FILES_NAMES __A : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP __A : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __A : Tuple = ["""input_ids""", """attention_mask"""] def __init__( self , _UpperCamelCase , _UpperCamelCase="</s>" , _UpperCamelCase="<unk>" , _UpperCamelCase="<pad>" , _UpperCamelCase=100 , _UpperCamelCase=None , _UpperCamelCase = None , _UpperCamelCase=True , **_UpperCamelCase , ): # Add extra_ids to the special token list if extra_ids > 0 and additional_special_tokens is None: _UpperCAmelCase = [f'''<extra_id_{i}>''' for i in range(_UpperCamelCase )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra_id special tokens _UpperCAmelCase = len(set(filter(lambda _UpperCamelCase : bool('''extra_id''' in str(_UpperCamelCase ) ) , _UpperCamelCase ) ) ) if extra_tokens != extra_ids: raise ValueError( f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are''' ''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids''' ''' tokens''' ) if legacy: logger.warning_once( f'''You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to''' ''' read the related pull request available at https://github.com/huggingface/transformers/pull/24565''' ) _UpperCAmelCase = legacy _UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , pad_token=_UpperCamelCase , extra_ids=_UpperCamelCase , additional_special_tokens=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , legacy=_UpperCamelCase , **_UpperCamelCase , ) _UpperCAmelCase = vocab_file _UpperCAmelCase = extra_ids _UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_UpperCamelCase ) @staticmethod def UpperCamelCase( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes: _UpperCAmelCase = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path] if init_max_model_length is not None and init_max_model_length != max_model_length: return init_max_model_length elif init_max_model_length is None: warnings.warn( '''This tokenizer was incorrectly instantiated with a model max length of''' f''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this''' ''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with''' ''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on''' f''' {pretrained_model_name_or_path} automatically truncating your input to''' f''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences''' f''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with''' ''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please''' ''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , _UpperCamelCase , ) return max_model_length @property def UpperCamelCase( self ): return self.sp_model.get_piece_size() + self._extra_ids def UpperCamelCase( self ): _UpperCAmelCase = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase ) # normal case: some special tokens if token_ids_a is None: return ([0] * len(_UpperCamelCase )) + [1] return ([0] * len(_UpperCamelCase )) + [1] + ([0] * len(_UpperCamelCase )) + [1] def UpperCamelCase( self ): return list( set(filter(lambda _UpperCamelCase : bool(re.search(R'''<extra_id_\d+>''' , _UpperCamelCase ) ) is not None , self.additional_special_tokens ) ) ) def UpperCamelCase( self ): return [self._convert_token_to_id(_UpperCamelCase ) for token in self.get_sentinel_tokens()] def UpperCamelCase( self , _UpperCamelCase ): if len(_UpperCamelCase ) > 0 and token_ids[-1] == self.eos_token_id: warnings.warn( f'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated''' ''' eos tokens being added.''' ) return token_ids else: return token_ids + [self.eos_token_id] def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None ): _UpperCAmelCase = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None ): _UpperCAmelCase = self._add_eos_if_not_present(_UpperCamelCase ) if token_ids_a is None: return token_ids_a else: _UpperCAmelCase = self._add_eos_if_not_present(_UpperCamelCase ) return token_ids_a + token_ids_a def __getstate__( self ): _UpperCAmelCase = self.__dict__.copy() _UpperCAmelCase = None return state def __setstate__( self , _UpperCamelCase ): _UpperCAmelCase = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): _UpperCAmelCase = {} _UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def UpperCamelCase( self , _UpperCamelCase , **_UpperCamelCase ): # Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at # the beginning of the text if not self.legacy: _UpperCAmelCase = SPIECE_UNDERLINE + text.replace(_UpperCamelCase , ''' ''' ) return super().tokenize(_UpperCamelCase , **_UpperCamelCase ) def UpperCamelCase( self , _UpperCamelCase , **_UpperCamelCase ): if not self.legacy: _UpperCAmelCase = text.startswith(_UpperCamelCase ) if is_first: _UpperCAmelCase = text[1:] _UpperCAmelCase = self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase ) if not self.legacy and not is_first and not text.startswith(''' ''' ) and tokens[0].startswith(_UpperCamelCase ): _UpperCAmelCase = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:] return tokens def UpperCamelCase( self , _UpperCamelCase ): if token.startswith('''<extra_id_''' ): _UpperCAmelCase = re.match(R'''<extra_id_(\d+)>''' , _UpperCamelCase ) _UpperCAmelCase = int(match.group(1 ) ) return self.vocab_size - num - 1 return self.sp_model.piece_to_id(_UpperCamelCase ) def UpperCamelCase( self , _UpperCamelCase ): if index < self.sp_model.get_piece_size(): _UpperCAmelCase = self.sp_model.IdToPiece(_UpperCamelCase ) else: _UpperCAmelCase = f'''<extra_id_{self.vocab_size - 1 - index}>''' return token def UpperCamelCase( self , _UpperCamelCase ): _UpperCAmelCase = [] _UpperCAmelCase = '''''' _UpperCAmelCase = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(_UpperCamelCase ) + token _UpperCAmelCase = True _UpperCAmelCase = [] else: current_sub_tokens.append(_UpperCamelCase ) _UpperCAmelCase = False out_string += self.sp_model.decode(_UpperCamelCase ) return out_string.strip() def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None ): if not os.path.isdir(_UpperCamelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return _UpperCAmelCase = os.path.join( _UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _UpperCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(_UpperCamelCase , '''wb''' ) as fi: _UpperCAmelCase = self.sp_model.serialized_model_proto() fi.write(_UpperCamelCase ) return (out_vocab_file,)
32
1
import flax.linen as nn import jax import jax.numpy as jnp class __UpperCamelCase ( nn.Module ): __A : int __A : jnp.dtype = jnp.floataa def UpperCamelCase( self ): _UpperCAmelCase = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self , _UpperCamelCase ): _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = hidden_states.shape _UpperCAmelCase = jax.image.resize( _UpperCamelCase , shape=(batch, height * 2, width * 2, channels) , method='''nearest''' , ) _UpperCAmelCase = self.conv(_UpperCamelCase ) return hidden_states class __UpperCamelCase ( nn.Module ): __A : int __A : jnp.dtype = jnp.floataa def UpperCamelCase( self ): _UpperCAmelCase = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self , _UpperCamelCase ): # pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim # hidden_states = jnp.pad(hidden_states, pad_width=pad) _UpperCAmelCase = self.conv(_UpperCamelCase ) return hidden_states class __UpperCamelCase ( nn.Module ): __A : int __A : int = None __A : float = 0.0 __A : bool = None __A : jnp.dtype = jnp.floataa def UpperCamelCase( self ): _UpperCAmelCase = self.in_channels if self.out_channels is None else self.out_channels _UpperCAmelCase = nn.GroupNorm(num_groups=32 , epsilon=1e-5 ) _UpperCAmelCase = nn.Conv( _UpperCamelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) _UpperCAmelCase = nn.Dense(_UpperCamelCase , dtype=self.dtype ) _UpperCAmelCase = nn.GroupNorm(num_groups=32 , epsilon=1e-5 ) _UpperCAmelCase = nn.Dropout(self.dropout_prob ) _UpperCAmelCase = nn.Conv( _UpperCamelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) _UpperCAmelCase = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut _UpperCAmelCase = None if use_nin_shortcut: _UpperCAmelCase = nn.Conv( _UpperCamelCase , kernel_size=(1, 1) , strides=(1, 1) , padding='''VALID''' , dtype=self.dtype , ) def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=True ): _UpperCAmelCase = hidden_states _UpperCAmelCase = self.norma(_UpperCamelCase ) _UpperCAmelCase = nn.swish(_UpperCamelCase ) _UpperCAmelCase = self.conva(_UpperCamelCase ) _UpperCAmelCase = self.time_emb_proj(nn.swish(_UpperCamelCase ) ) _UpperCAmelCase = jnp.expand_dims(jnp.expand_dims(_UpperCamelCase , 1 ) , 1 ) _UpperCAmelCase = hidden_states + temb _UpperCAmelCase = self.norma(_UpperCamelCase ) _UpperCAmelCase = nn.swish(_UpperCamelCase ) _UpperCAmelCase = self.dropout(_UpperCamelCase , _UpperCamelCase ) _UpperCAmelCase = self.conva(_UpperCamelCase ) if self.conv_shortcut is not None: _UpperCAmelCase = self.conv_shortcut(_UpperCamelCase ) return hidden_states + residual
32
from __future__ import annotations def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> bool: """simple docstring""" _UpperCAmelCase = str(SCREAMING_SNAKE_CASE_ ) return len(SCREAMING_SNAKE_CASE_ ) == 9 and set(SCREAMING_SNAKE_CASE_ ) == set('''123456789''' ) def A__ ( ) -> int | None: """simple docstring""" for base_num in range(99_99 , 49_99 , -1 ): _UpperCAmelCase = 10_00_02 * base_num if is_9_pandigital(SCREAMING_SNAKE_CASE_ ): return candidate for base_num in range(3_33 , 99 , -1 ): _UpperCAmelCase = 1_00_20_03 * base_num if is_9_pandigital(SCREAMING_SNAKE_CASE_ ): return candidate return None if __name__ == "__main__": print(f'''{solution() = }''')
32
1
def A__ ( SCREAMING_SNAKE_CASE_ : int = 10_00 ) -> int: """simple docstring""" _UpperCAmelCase = 3 _UpperCAmelCase = 0 while a < n: if a % 3 == 0 or a % 5 == 0: result += a elif a % 15 == 0: result -= a a += 1 return result if __name__ == "__main__": print(f'''{solution() = }''')
32
import numpy as np def A__ ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : float ) -> np.ndarray: """simple docstring""" return np.where(vector > 0 , SCREAMING_SNAKE_CASE_ , (alpha * (np.exp(SCREAMING_SNAKE_CASE_ ) - 1)) ) if __name__ == "__main__": import doctest doctest.testmod()
32
1
import collections.abc from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_poolformer import PoolFormerConfig UpperCAmelCase_ = logging.get_logger(__name__) # General docstring UpperCAmelCase_ = "PoolFormerConfig" # Base docstring UpperCAmelCase_ = "sail/poolformer_s12" UpperCAmelCase_ = [1, 5_12, 7, 7] # Image classification docstring UpperCAmelCase_ = "sail/poolformer_s12" UpperCAmelCase_ = "tabby, tabby cat" UpperCAmelCase_ = [ "sail/poolformer_s12", # See all PoolFormer models at https://huggingface.co/models?filter=poolformer ] def A__ ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : float = 0.0 , SCREAMING_SNAKE_CASE_ : bool = False ) -> str: """simple docstring""" if drop_prob == 0.0 or not training: return input _UpperCAmelCase = 1 - drop_prob _UpperCAmelCase = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets _UpperCAmelCase = keep_prob + torch.rand(SCREAMING_SNAKE_CASE_ , dtype=input.dtype , device=input.device ) random_tensor.floor_() # binarize _UpperCAmelCase = input.div(SCREAMING_SNAKE_CASE_ ) * random_tensor return output class __UpperCamelCase ( nn.Module ): def __init__( self , _UpperCamelCase = None ): super().__init__() _UpperCAmelCase = drop_prob def UpperCamelCase( self , _UpperCamelCase ): return drop_path(_UpperCamelCase , self.drop_prob , self.training ) def UpperCamelCase( self ): return "p={}".format(self.drop_prob ) class __UpperCamelCase ( nn.Module ): def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ): super().__init__() _UpperCAmelCase = patch_size if isinstance(_UpperCamelCase , collections.abc.Iterable ) else (patch_size, patch_size) _UpperCAmelCase = stride if isinstance(_UpperCamelCase , collections.abc.Iterable ) else (stride, stride) _UpperCAmelCase = padding if isinstance(_UpperCamelCase , collections.abc.Iterable ) else (padding, padding) _UpperCAmelCase = nn.Convad(_UpperCamelCase , _UpperCamelCase , kernel_size=_UpperCamelCase , stride=_UpperCamelCase , padding=_UpperCamelCase ) _UpperCAmelCase = norm_layer(_UpperCamelCase ) if norm_layer else nn.Identity() def UpperCamelCase( self , _UpperCamelCase ): _UpperCAmelCase = self.projection(_UpperCamelCase ) _UpperCAmelCase = self.norm(_UpperCamelCase ) return embeddings class __UpperCamelCase ( nn.GroupNorm ): def __init__( self , _UpperCamelCase , **_UpperCamelCase ): super().__init__(1 , _UpperCamelCase , **_UpperCamelCase ) class __UpperCamelCase ( nn.Module ): def __init__( self , _UpperCamelCase ): super().__init__() _UpperCAmelCase = nn.AvgPoolad(_UpperCamelCase , stride=1 , padding=pool_size // 2 , count_include_pad=_UpperCamelCase ) def UpperCamelCase( self , _UpperCamelCase ): return self.pool(_UpperCamelCase ) - hidden_states class __UpperCamelCase ( nn.Module ): def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): super().__init__() _UpperCAmelCase = nn.Convad(_UpperCamelCase , _UpperCamelCase , 1 ) _UpperCAmelCase = nn.Convad(_UpperCamelCase , _UpperCamelCase , 1 ) _UpperCAmelCase = PoolFormerDropPath(_UpperCamelCase ) if isinstance(config.hidden_act , _UpperCamelCase ): _UpperCAmelCase = ACTaFN[config.hidden_act] else: _UpperCAmelCase = config.hidden_act def UpperCamelCase( self , _UpperCamelCase ): _UpperCAmelCase = self.conva(_UpperCamelCase ) _UpperCAmelCase = self.act_fn(_UpperCamelCase ) _UpperCAmelCase = self.drop(_UpperCamelCase ) _UpperCAmelCase = self.conva(_UpperCamelCase ) _UpperCAmelCase = self.drop(_UpperCamelCase ) return hidden_states class __UpperCamelCase ( nn.Module ): def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): super().__init__() _UpperCAmelCase = PoolFormerPooling(_UpperCamelCase ) _UpperCAmelCase = PoolFormerOutput(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) _UpperCAmelCase = PoolFormerGroupNorm(_UpperCamelCase ) _UpperCAmelCase = PoolFormerGroupNorm(_UpperCamelCase ) # Useful for training neural nets _UpperCAmelCase = PoolFormerDropPath(_UpperCamelCase ) if drop_path > 0.0 else nn.Identity() _UpperCAmelCase = config.use_layer_scale if config.use_layer_scale: _UpperCAmelCase = nn.Parameter( config.layer_scale_init_value * torch.ones((_UpperCamelCase) ) , requires_grad=_UpperCamelCase ) _UpperCAmelCase = nn.Parameter( config.layer_scale_init_value * torch.ones((_UpperCamelCase) ) , requires_grad=_UpperCamelCase ) def UpperCamelCase( self , _UpperCamelCase ): if self.use_layer_scale: _UpperCAmelCase = self.pooling(self.before_norm(_UpperCamelCase ) ) _UpperCAmelCase = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output # First residual connection _UpperCAmelCase = hidden_states + self.drop_path(_UpperCamelCase ) _UpperCAmelCase = () _UpperCAmelCase = self.output(self.after_norm(_UpperCamelCase ) ) _UpperCAmelCase = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output # Second residual connection _UpperCAmelCase = hidden_states + self.drop_path(_UpperCamelCase ) _UpperCAmelCase = (output,) + outputs return outputs else: _UpperCAmelCase = self.drop_path(self.pooling(self.before_norm(_UpperCamelCase ) ) ) # First residual connection _UpperCAmelCase = pooling_output + hidden_states _UpperCAmelCase = () # Second residual connection inside the PoolFormerOutput block _UpperCAmelCase = self.drop_path(self.output(self.after_norm(_UpperCamelCase ) ) ) _UpperCAmelCase = hidden_states + layer_output _UpperCAmelCase = (output,) + outputs return outputs class __UpperCamelCase ( nn.Module ): def __init__( self , _UpperCamelCase ): super().__init__() _UpperCAmelCase = config # stochastic depth decay rule _UpperCAmelCase = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )] # patch embeddings _UpperCAmelCase = [] for i in range(config.num_encoder_blocks ): embeddings.append( PoolFormerEmbeddings( patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) ) _UpperCAmelCase = nn.ModuleList(_UpperCamelCase ) # Transformer blocks _UpperCAmelCase = [] _UpperCAmelCase = 0 for i in range(config.num_encoder_blocks ): # each block consists of layers _UpperCAmelCase = [] if i != 0: cur += config.depths[i - 1] for j in range(config.depths[i] ): layers.append( PoolFormerLayer( _UpperCamelCase , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) ) blocks.append(nn.ModuleList(_UpperCamelCase ) ) _UpperCAmelCase = nn.ModuleList(_UpperCamelCase ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase=False , _UpperCamelCase=True ): _UpperCAmelCase = () if output_hidden_states else None _UpperCAmelCase = pixel_values for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ): _UpperCAmelCase , _UpperCAmelCase = layers # Get patch embeddings from hidden_states _UpperCAmelCase = embedding_layer(_UpperCamelCase ) # Send the embeddings through the blocks for _, blk in enumerate(_UpperCamelCase ): _UpperCAmelCase = blk(_UpperCamelCase ) _UpperCAmelCase = layer_outputs[0] if output_hidden_states: _UpperCAmelCase = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states] if v is not None ) return BaseModelOutputWithNoAttention(last_hidden_state=_UpperCamelCase , hidden_states=_UpperCamelCase ) class __UpperCamelCase ( A__ ): __A : Tuple = PoolFormerConfig __A : int = """poolformer""" __A : Tuple = """pixel_values""" __A : Optional[Any] = True def UpperCamelCase( self , _UpperCamelCase ): if isinstance(_UpperCamelCase , (nn.Linear, nn.Convad) ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() elif isinstance(_UpperCamelCase , nn.LayerNorm ): module.bias.data.zero_() module.weight.data.fill_(1.0 ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase=False ): if isinstance(_UpperCamelCase , _UpperCamelCase ): _UpperCAmelCase = value UpperCAmelCase_ = r"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n" UpperCAmelCase_ = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n" @add_start_docstrings( """The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.""" , A__ , ) class __UpperCamelCase ( A__ ): def __init__( self , _UpperCamelCase ): super().__init__(_UpperCamelCase ) _UpperCAmelCase = config _UpperCAmelCase = PoolFormerEncoder(_UpperCamelCase ) # Initialize weights and apply final processing self.post_init() def UpperCamelCase( self ): return self.embeddings.patch_embeddings @add_start_docstrings_to_model_forward(_UpperCamelCase ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=_UpperCamelCase , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def UpperCamelCase( self , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , ): _UpperCAmelCase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _UpperCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError('''You have to specify pixel_values''' ) _UpperCAmelCase = self.encoder( _UpperCamelCase , output_hidden_states=_UpperCamelCase , return_dict=_UpperCamelCase , ) _UpperCAmelCase = encoder_outputs[0] if not return_dict: return (sequence_output, None) + encoder_outputs[1:] return BaseModelOutputWithNoAttention( last_hidden_state=_UpperCamelCase , hidden_states=encoder_outputs.hidden_states , ) class __UpperCamelCase ( nn.Module ): def __init__( self , _UpperCamelCase ): super().__init__() _UpperCAmelCase = nn.Linear(config.hidden_size , config.hidden_size ) def UpperCamelCase( self , _UpperCamelCase ): _UpperCAmelCase = self.dense(_UpperCamelCase ) return output @add_start_docstrings( """ PoolFormer Model transformer with an image classification head on top """ , A__ , ) class __UpperCamelCase ( A__ ): def __init__( self , _UpperCamelCase ): super().__init__(_UpperCamelCase ) _UpperCAmelCase = config.num_labels _UpperCAmelCase = PoolFormerModel(_UpperCamelCase ) # Final norm _UpperCAmelCase = PoolFormerGroupNorm(config.hidden_sizes[-1] ) # Classifier head _UpperCAmelCase = ( nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(_UpperCamelCase ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_UpperCamelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def UpperCamelCase( self , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , ): _UpperCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict _UpperCAmelCase = self.poolformer( _UpperCamelCase , output_hidden_states=_UpperCamelCase , return_dict=_UpperCamelCase , ) _UpperCAmelCase = outputs[0] _UpperCAmelCase = self.classifier(self.norm(_UpperCamelCase ).mean([-2, -1] ) ) _UpperCAmelCase = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: _UpperCAmelCase = '''regression''' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): _UpperCAmelCase = '''single_label_classification''' else: _UpperCAmelCase = '''multi_label_classification''' if self.config.problem_type == "regression": _UpperCAmelCase = MSELoss() if self.num_labels == 1: _UpperCAmelCase = loss_fct(logits.squeeze() , labels.squeeze() ) else: _UpperCAmelCase = loss_fct(_UpperCamelCase , _UpperCamelCase ) elif self.config.problem_type == "single_label_classification": _UpperCAmelCase = CrossEntropyLoss() _UpperCAmelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": _UpperCAmelCase = BCEWithLogitsLoss() _UpperCAmelCase = loss_fct(_UpperCamelCase , _UpperCamelCase ) if not return_dict: _UpperCAmelCase = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=_UpperCamelCase , logits=_UpperCamelCase , hidden_states=outputs.hidden_states )
32
UpperCAmelCase_ = { "A": ".-", "B": "-...", "C": "-.-.", "D": "-..", "E": ".", "F": "..-.", "G": "--.", "H": "....", "I": "..", "J": ".---", "K": "-.-", "L": ".-..", "M": "--", "N": "-.", "O": "---", "P": ".--.", "Q": "--.-", "R": ".-.", "S": "...", "T": "-", "U": "..-", "V": "...-", "W": ".--", "X": "-..-", "Y": "-.--", "Z": "--..", "1": ".----", "2": "..---", "3": "...--", "4": "....-", "5": ".....", "6": "-....", "7": "--...", "8": "---..", "9": "----.", "0": "-----", "&": ".-...", "@": ".--.-.", ":": "---...", ",": "--..--", ".": ".-.-.-", "'": ".----.", "\"": ".-..-.", "?": "..--..", "/": "-..-.", "=": "-...-", "+": ".-.-.", "-": "-....-", "(": "-.--.", ")": "-.--.-", "!": "-.-.--", " ": "/" } # Exclamation mark is not in ITU-R recommendation # fmt: on UpperCAmelCase_ = {value: key for key, value in MORSE_CODE_DICT.items()} def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> str: """simple docstring""" return " ".join(MORSE_CODE_DICT[char] for char in message.upper() ) def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> str: """simple docstring""" return "".join(REVERSE_DICT[char] for char in message.split() ) def A__ ( ) -> None: """simple docstring""" _UpperCAmelCase = '''Morse code here!''' print(SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = encrypt(SCREAMING_SNAKE_CASE_ ) print(SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = decrypt(SCREAMING_SNAKE_CASE_ ) print(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": main()
32
1
from __future__ import annotations def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> bool: """simple docstring""" _UpperCAmelCase = str(SCREAMING_SNAKE_CASE_ ) return len(SCREAMING_SNAKE_CASE_ ) == 9 and set(SCREAMING_SNAKE_CASE_ ) == set('''123456789''' ) def A__ ( ) -> int | None: """simple docstring""" for base_num in range(99_99 , 49_99 , -1 ): _UpperCAmelCase = 10_00_02 * base_num if is_9_pandigital(SCREAMING_SNAKE_CASE_ ): return candidate for base_num in range(3_33 , 99 , -1 ): _UpperCAmelCase = 1_00_20_03 * base_num if is_9_pandigital(SCREAMING_SNAKE_CASE_ ): return candidate return None if __name__ == "__main__": print(f'''{solution() = }''')
32
import gc import unittest import numpy as np import torch from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class __UpperCamelCase ( A__ , unittest.TestCase ): __A : Any = DanceDiffusionPipeline __A : Any = UNCONDITIONAL_AUDIO_GENERATION_PARAMS __A : Tuple = PipelineTesterMixin.required_optional_params - { """callback""", """latents""", """callback_steps""", """output_type""", """num_images_per_prompt""", } __A : Tuple = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS __A : List[str] = False __A : str = False def UpperCamelCase( self ): torch.manual_seed(0 ) _UpperCAmelCase = UNetaDModel( block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=16000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=_UpperCamelCase , use_timestep_embedding=_UpperCamelCase , time_embedding_type='''fourier''' , mid_block_type='''UNetMidBlock1D''' , down_block_types=('''DownBlock1DNoSkip''', '''DownBlock1D''', '''AttnDownBlock1D''') , up_block_types=('''AttnUpBlock1D''', '''UpBlock1D''', '''UpBlock1DNoSkip''') , ) _UpperCAmelCase = IPNDMScheduler() _UpperCAmelCase = { '''unet''': unet, '''scheduler''': scheduler, } return components def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase=0 ): if str(_UpperCamelCase ).startswith('''mps''' ): _UpperCAmelCase = torch.manual_seed(_UpperCamelCase ) else: _UpperCAmelCase = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase ) _UpperCAmelCase = { '''batch_size''': 1, '''generator''': generator, '''num_inference_steps''': 4, } return inputs def UpperCamelCase( self ): _UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator _UpperCAmelCase = self.get_dummy_components() _UpperCAmelCase = DanceDiffusionPipeline(**_UpperCamelCase ) _UpperCAmelCase = pipe.to(_UpperCamelCase ) pipe.set_progress_bar_config(disable=_UpperCamelCase ) _UpperCAmelCase = self.get_dummy_inputs(_UpperCamelCase ) _UpperCAmelCase = pipe(**_UpperCamelCase ) _UpperCAmelCase = output.audios _UpperCAmelCase = audio[0, -3:, -3:] assert audio.shape == (1, 2, components["unet"].sample_size) _UpperCAmelCase = np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2 @skip_mps def UpperCamelCase( self ): return super().test_save_load_local() @skip_mps def UpperCamelCase( self ): return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 ) @skip_mps def UpperCamelCase( self ): return super().test_save_load_optional_components() @skip_mps def UpperCamelCase( self ): return super().test_attention_slicing_forward_pass() def UpperCamelCase( self ): super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class __UpperCamelCase ( unittest.TestCase ): def UpperCamelCase( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase( self ): _UpperCAmelCase = torch_device _UpperCAmelCase = DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' ) _UpperCAmelCase = pipe.to(_UpperCamelCase ) pipe.set_progress_bar_config(disable=_UpperCamelCase ) _UpperCAmelCase = torch.manual_seed(0 ) _UpperCAmelCase = pipe(generator=_UpperCamelCase , num_inference_steps=100 , audio_length_in_s=4.096 ) _UpperCAmelCase = output.audios _UpperCAmelCase = audio[0, -3:, -3:] assert audio.shape == (1, 2, pipe.unet.sample_size) _UpperCAmelCase = np.array([-0.0192, -0.0231, -0.0318, -0.0059, 0.0002, -0.0020] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2 def UpperCamelCase( self ): _UpperCAmelCase = torch_device _UpperCAmelCase = DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' , torch_dtype=torch.floataa ) _UpperCAmelCase = pipe.to(_UpperCamelCase ) pipe.set_progress_bar_config(disable=_UpperCamelCase ) _UpperCAmelCase = torch.manual_seed(0 ) _UpperCAmelCase = pipe(generator=_UpperCamelCase , num_inference_steps=100 , audio_length_in_s=4.096 ) _UpperCAmelCase = output.audios _UpperCAmelCase = audio[0, -3:, -3:] assert audio.shape == (1, 2, pipe.unet.sample_size) _UpperCAmelCase = np.array([-0.0367, -0.0488, -0.0771, -0.0525, -0.0444, -0.0341] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
32
1
import re from pathlib import Path from unittest import TestCase import pytest @pytest.mark.integration class __UpperCamelCase ( A__ ): def UpperCamelCase( self , _UpperCamelCase ): with open(_UpperCamelCase , encoding='''utf-8''' ) as input_file: _UpperCAmelCase = re.compile(R'''(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)''' ) _UpperCAmelCase = input_file.read() _UpperCAmelCase = regexp.search(_UpperCamelCase ) return match def UpperCamelCase( self , _UpperCamelCase ): with open(_UpperCamelCase , encoding='''utf-8''' ) as input_file: _UpperCAmelCase = re.compile(R'''#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()''' , re.DOTALL ) _UpperCAmelCase = input_file.read() # use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search` _UpperCAmelCase = regexp.finditer(_UpperCamelCase ) _UpperCAmelCase = [match for match in matches if match is not None and match.group(1 ) is not None] return matches[0] if matches else None def UpperCamelCase( self ): _UpperCAmelCase = Path('''./datasets''' ) _UpperCAmelCase = list(dataset_paths.absolute().glob('''**/*.py''' ) ) for dataset in dataset_files: if self._no_encoding_on_file_open(str(_UpperCamelCase ) ): raise AssertionError(f'''open(...) must use utf-8 encoding in {dataset}''' ) def UpperCamelCase( self ): _UpperCAmelCase = Path('''./datasets''' ) _UpperCAmelCase = list(dataset_paths.absolute().glob('''**/*.py''' ) ) for dataset in dataset_files: if self._no_print_statements(str(_UpperCamelCase ) ): raise AssertionError(f'''print statement found in {dataset}. Use datasets.logger/logging instead.''' )
32
from collections import OrderedDict from ...utils import logging from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update from .configuration_auto import CONFIG_MAPPING_NAMES UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = OrderedDict( [ # Base model mapping ("albert", "FlaxAlbertModel"), ("bart", "FlaxBartModel"), ("beit", "FlaxBeitModel"), ("bert", "FlaxBertModel"), ("big_bird", "FlaxBigBirdModel"), ("blenderbot", "FlaxBlenderbotModel"), ("blenderbot-small", "FlaxBlenderbotSmallModel"), ("clip", "FlaxCLIPModel"), ("distilbert", "FlaxDistilBertModel"), ("electra", "FlaxElectraModel"), ("gpt-sw3", "FlaxGPT2Model"), ("gpt2", "FlaxGPT2Model"), ("gpt_neo", "FlaxGPTNeoModel"), ("gptj", "FlaxGPTJModel"), ("longt5", "FlaxLongT5Model"), ("marian", "FlaxMarianModel"), ("mbart", "FlaxMBartModel"), ("mt5", "FlaxMT5Model"), ("opt", "FlaxOPTModel"), ("pegasus", "FlaxPegasusModel"), ("regnet", "FlaxRegNetModel"), ("resnet", "FlaxResNetModel"), ("roberta", "FlaxRobertaModel"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormModel"), ("roformer", "FlaxRoFormerModel"), ("t5", "FlaxT5Model"), ("vision-text-dual-encoder", "FlaxVisionTextDualEncoderModel"), ("vit", "FlaxViTModel"), ("wav2vec2", "FlaxWav2Vec2Model"), ("whisper", "FlaxWhisperModel"), ("xglm", "FlaxXGLMModel"), ("xlm-roberta", "FlaxXLMRobertaModel"), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for pre-training mapping ("albert", "FlaxAlbertForPreTraining"), ("bart", "FlaxBartForConditionalGeneration"), ("bert", "FlaxBertForPreTraining"), ("big_bird", "FlaxBigBirdForPreTraining"), ("electra", "FlaxElectraForPreTraining"), ("longt5", "FlaxLongT5ForConditionalGeneration"), ("mbart", "FlaxMBartForConditionalGeneration"), ("mt5", "FlaxMT5ForConditionalGeneration"), ("roberta", "FlaxRobertaForMaskedLM"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"), ("roformer", "FlaxRoFormerForMaskedLM"), ("t5", "FlaxT5ForConditionalGeneration"), ("wav2vec2", "FlaxWav2Vec2ForPreTraining"), ("whisper", "FlaxWhisperForConditionalGeneration"), ("xlm-roberta", "FlaxXLMRobertaForMaskedLM"), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for Masked LM mapping ("albert", "FlaxAlbertForMaskedLM"), ("bart", "FlaxBartForConditionalGeneration"), ("bert", "FlaxBertForMaskedLM"), ("big_bird", "FlaxBigBirdForMaskedLM"), ("distilbert", "FlaxDistilBertForMaskedLM"), ("electra", "FlaxElectraForMaskedLM"), ("mbart", "FlaxMBartForConditionalGeneration"), ("roberta", "FlaxRobertaForMaskedLM"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"), ("roformer", "FlaxRoFormerForMaskedLM"), ("xlm-roberta", "FlaxXLMRobertaForMaskedLM"), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for Seq2Seq Causal LM mapping ("bart", "FlaxBartForConditionalGeneration"), ("blenderbot", "FlaxBlenderbotForConditionalGeneration"), ("blenderbot-small", "FlaxBlenderbotSmallForConditionalGeneration"), ("encoder-decoder", "FlaxEncoderDecoderModel"), ("longt5", "FlaxLongT5ForConditionalGeneration"), ("marian", "FlaxMarianMTModel"), ("mbart", "FlaxMBartForConditionalGeneration"), ("mt5", "FlaxMT5ForConditionalGeneration"), ("pegasus", "FlaxPegasusForConditionalGeneration"), ("t5", "FlaxT5ForConditionalGeneration"), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for Image-classsification ("beit", "FlaxBeitForImageClassification"), ("regnet", "FlaxRegNetForImageClassification"), ("resnet", "FlaxResNetForImageClassification"), ("vit", "FlaxViTForImageClassification"), ] ) UpperCAmelCase_ = OrderedDict( [ ("vision-encoder-decoder", "FlaxVisionEncoderDecoderModel"), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for Causal LM mapping ("bart", "FlaxBartForCausalLM"), ("bert", "FlaxBertForCausalLM"), ("big_bird", "FlaxBigBirdForCausalLM"), ("electra", "FlaxElectraForCausalLM"), ("gpt-sw3", "FlaxGPT2LMHeadModel"), ("gpt2", "FlaxGPT2LMHeadModel"), ("gpt_neo", "FlaxGPTNeoForCausalLM"), ("gptj", "FlaxGPTJForCausalLM"), ("opt", "FlaxOPTForCausalLM"), ("roberta", "FlaxRobertaForCausalLM"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForCausalLM"), ("xglm", "FlaxXGLMForCausalLM"), ("xlm-roberta", "FlaxXLMRobertaForCausalLM"), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for Sequence Classification mapping ("albert", "FlaxAlbertForSequenceClassification"), ("bart", "FlaxBartForSequenceClassification"), ("bert", "FlaxBertForSequenceClassification"), ("big_bird", "FlaxBigBirdForSequenceClassification"), ("distilbert", "FlaxDistilBertForSequenceClassification"), ("electra", "FlaxElectraForSequenceClassification"), ("mbart", "FlaxMBartForSequenceClassification"), ("roberta", "FlaxRobertaForSequenceClassification"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForSequenceClassification"), ("roformer", "FlaxRoFormerForSequenceClassification"), ("xlm-roberta", "FlaxXLMRobertaForSequenceClassification"), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for Question Answering mapping ("albert", "FlaxAlbertForQuestionAnswering"), ("bart", "FlaxBartForQuestionAnswering"), ("bert", "FlaxBertForQuestionAnswering"), ("big_bird", "FlaxBigBirdForQuestionAnswering"), ("distilbert", "FlaxDistilBertForQuestionAnswering"), ("electra", "FlaxElectraForQuestionAnswering"), ("mbart", "FlaxMBartForQuestionAnswering"), ("roberta", "FlaxRobertaForQuestionAnswering"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForQuestionAnswering"), ("roformer", "FlaxRoFormerForQuestionAnswering"), ("xlm-roberta", "FlaxXLMRobertaForQuestionAnswering"), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for Token Classification mapping ("albert", "FlaxAlbertForTokenClassification"), ("bert", "FlaxBertForTokenClassification"), ("big_bird", "FlaxBigBirdForTokenClassification"), ("distilbert", "FlaxDistilBertForTokenClassification"), ("electra", "FlaxElectraForTokenClassification"), ("roberta", "FlaxRobertaForTokenClassification"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForTokenClassification"), ("roformer", "FlaxRoFormerForTokenClassification"), ("xlm-roberta", "FlaxXLMRobertaForTokenClassification"), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for Multiple Choice mapping ("albert", "FlaxAlbertForMultipleChoice"), ("bert", "FlaxBertForMultipleChoice"), ("big_bird", "FlaxBigBirdForMultipleChoice"), ("distilbert", "FlaxDistilBertForMultipleChoice"), ("electra", "FlaxElectraForMultipleChoice"), ("roberta", "FlaxRobertaForMultipleChoice"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMultipleChoice"), ("roformer", "FlaxRoFormerForMultipleChoice"), ("xlm-roberta", "FlaxXLMRobertaForMultipleChoice"), ] ) UpperCAmelCase_ = OrderedDict( [ ("bert", "FlaxBertForNextSentencePrediction"), ] ) UpperCAmelCase_ = OrderedDict( [ ("speech-encoder-decoder", "FlaxSpeechEncoderDecoderModel"), ("whisper", "FlaxWhisperForConditionalGeneration"), ] ) UpperCAmelCase_ = OrderedDict( [ ("whisper", "FlaxWhisperForAudioClassification"), ] ) UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES) UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES) UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES ) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES ) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES ) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES ) class __UpperCamelCase ( _BaseAutoModelClass ): __A : List[str] = FLAX_MODEL_MAPPING UpperCAmelCase_ = auto_class_update(FlaxAutoModel) class __UpperCamelCase ( _BaseAutoModelClass ): __A : Optional[Any] = FLAX_MODEL_FOR_PRETRAINING_MAPPING UpperCAmelCase_ = auto_class_update(FlaxAutoModelForPreTraining, head_doc="pretraining") class __UpperCamelCase ( _BaseAutoModelClass ): __A : List[Any] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING UpperCAmelCase_ = auto_class_update(FlaxAutoModelForCausalLM, head_doc="causal language modeling") class __UpperCamelCase ( _BaseAutoModelClass ): __A : List[str] = FLAX_MODEL_FOR_MASKED_LM_MAPPING UpperCAmelCase_ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="masked language modeling") class __UpperCamelCase ( _BaseAutoModelClass ): __A : Dict = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING UpperCAmelCase_ = auto_class_update( FlaxAutoModelForSeqaSeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base" ) class __UpperCamelCase ( _BaseAutoModelClass ): __A : List[str] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING UpperCAmelCase_ = auto_class_update( FlaxAutoModelForSequenceClassification, head_doc="sequence classification" ) class __UpperCamelCase ( _BaseAutoModelClass ): __A : Dict = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING UpperCAmelCase_ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="question answering") class __UpperCamelCase ( _BaseAutoModelClass ): __A : Union[str, Any] = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING UpperCAmelCase_ = auto_class_update( FlaxAutoModelForTokenClassification, head_doc="token classification" ) class __UpperCamelCase ( _BaseAutoModelClass ): __A : Union[str, Any] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING UpperCAmelCase_ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="multiple choice") class __UpperCamelCase ( _BaseAutoModelClass ): __A : Any = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING UpperCAmelCase_ = auto_class_update( FlaxAutoModelForNextSentencePrediction, head_doc="next sentence prediction" ) class __UpperCamelCase ( _BaseAutoModelClass ): __A : Dict = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING UpperCAmelCase_ = auto_class_update( FlaxAutoModelForImageClassification, head_doc="image classification" ) class __UpperCamelCase ( _BaseAutoModelClass ): __A : Optional[int] = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING UpperCAmelCase_ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="vision-to-text modeling") class __UpperCamelCase ( _BaseAutoModelClass ): __A : str = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING UpperCAmelCase_ = auto_class_update( FlaxAutoModelForSpeechSeqaSeq, head_doc="sequence-to-sequence speech-to-text modeling" )
32
1
from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging UpperCAmelCase_ = logging.get_logger(__name__) class __UpperCamelCase ( A__ ): __A : str = ["""audio_values""", """audio_mask"""] def __init__( self , _UpperCamelCase=2048 , _UpperCamelCase=1 , _UpperCamelCase=[16, 16] , _UpperCamelCase=128 , _UpperCamelCase=44100 , _UpperCamelCase=86 , _UpperCamelCase=2048 , _UpperCamelCase=0.0 , **_UpperCamelCase , ): super().__init__( feature_size=_UpperCamelCase , sampling_rate=_UpperCamelCase , padding_value=_UpperCamelCase , **_UpperCamelCase , ) _UpperCAmelCase = spectrogram_length _UpperCAmelCase = num_channels _UpperCAmelCase = patch_size _UpperCAmelCase = feature_size // self.patch_size[1] _UpperCAmelCase = n_fft _UpperCAmelCase = sampling_rate // hop_length_to_sampling_rate _UpperCAmelCase = sampling_rate _UpperCAmelCase = padding_value _UpperCAmelCase = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=_UpperCamelCase , min_frequency=0.0 , max_frequency=22050.0 , sampling_rate=_UpperCamelCase , norm='''slaney''' , mel_scale='''slaney''' , ).T def UpperCamelCase( self , _UpperCamelCase ): _UpperCAmelCase = spectrogram( _UpperCamelCase , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='''dB''' , db_range=80.0 , ) _UpperCAmelCase = log_spec[:, :-1] _UpperCAmelCase = log_spec - 20.0 _UpperCAmelCase = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0 return log_spec def __call__( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = False , _UpperCamelCase = False , **_UpperCamelCase , ): if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( '''This feature extractor is set to support sampling rate''' f''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled''' f''' with {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( '''It is strongly recommended to pass the `sampling_rate` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) _UpperCAmelCase = isinstance(_UpperCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' ) _UpperCAmelCase = is_batched_numpy or ( isinstance(_UpperCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: _UpperCAmelCase = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(_UpperCamelCase , np.ndarray ): _UpperCAmelCase = np.asarray(_UpperCamelCase , dtype=np.floataa ) elif isinstance(_UpperCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): _UpperCAmelCase = raw_speech.astype(np.floataa ) # always return batch if not is_batched: _UpperCAmelCase = [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis _UpperCAmelCase = [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0] , _UpperCamelCase ): _UpperCAmelCase = [np.asarray(_UpperCamelCase , dtype=np.floataa ) for feature in audio_features] # Create audio attention mask _UpperCAmelCase = max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: _UpperCAmelCase = [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] _UpperCAmelCase = np.array(_UpperCamelCase ).astype(np.floataa ) # convert into correct format for padding _UpperCAmelCase = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch _UpperCAmelCase = np.ones([len(_UpperCamelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) _UpperCAmelCase = padded_audio_features * self.padding_value for i in range(len(_UpperCamelCase ) ): _UpperCAmelCase = audio_features[i] _UpperCAmelCase = feature # return as BatchFeature if return_attention_mask: _UpperCAmelCase = {'''audio_values''': padded_audio_features, '''audio_mask''': audio_mask} else: _UpperCAmelCase = {'''audio_values''': padded_audio_features} _UpperCAmelCase = BatchFeature(data=_UpperCamelCase , tensor_type=_UpperCamelCase ) return encoded_inputs
32
import baseaa def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> bytes: """simple docstring""" return baseaa.baaencode(string.encode('''utf-8''' ) ) def A__ ( SCREAMING_SNAKE_CASE_ : bytes ) -> str: """simple docstring""" return baseaa.baadecode(SCREAMING_SNAKE_CASE_ ).decode('''utf-8''' ) if __name__ == "__main__": UpperCAmelCase_ = "Hello World!" UpperCAmelCase_ = baseaa_encode(test) print(encoded) UpperCAmelCase_ = baseaa_decode(encoded) print(decoded)
32
1
import inspect import os import unittest from pathlib import Path import torch import accelerate from accelerate.test_utils import execute_subprocess_async from accelerate.test_utils.testing import run_command class __UpperCamelCase ( unittest.TestCase ): __A : List[Any] = inspect.getfile(accelerate.test_utils ) __A : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_cli.py"""] ) __A : Optional[int] = ["""accelerate""", """launch"""] __A : Any = Path.home() / """.cache/huggingface/accelerate""" __A : Any = """default_config.yaml""" __A : Union[str, Any] = config_folder / config_file __A : List[Any] = config_folder / """_default_config.yaml""" __A : Union[str, Any] = Path("""tests/test_configs""" ) @classmethod def UpperCamelCase( cls ): if cls.config_path.is_file(): cls.config_path.rename(cls.changed_path ) @classmethod def UpperCamelCase( cls ): if cls.changed_path.is_file(): cls.changed_path.rename(cls.config_path ) def UpperCamelCase( self ): _UpperCAmelCase = self.base_cmd if torch.cuda.is_available() and (torch.cuda.device_count() > 1): cmd += ["--multi_gpu"] execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() ) def UpperCamelCase( self ): for config in sorted(self.test_config_path.glob('''**/*.yaml''' ) ): with self.subTest(config_file=_UpperCamelCase ): execute_subprocess_async( self.base_cmd + ['''--config_file''', str(_UpperCamelCase ), self.test_file_path] , env=os.environ.copy() ) def UpperCamelCase( self ): execute_subprocess_async(['''accelerate''', '''test'''] , env=os.environ.copy() ) class __UpperCamelCase ( unittest.TestCase ): __A : Dict = """test-tpu""" __A : Optional[Any] = """us-central1-a""" __A : int = """ls""" __A : Tuple = ["""accelerate""", """tpu-config"""] __A : Union[str, Any] = """cd /usr/share""" __A : Optional[Any] = """tests/test_samples/test_command_file.sh""" __A : Any = """Running gcloud compute tpus tpu-vm ssh""" def UpperCamelCase( self ): _UpperCAmelCase = run_command( self.cmd + ['''--command''', self.command, '''--tpu_zone''', self.tpu_zone, '''--tpu_name''', self.tpu_name, '''--debug'''] , return_stdout=_UpperCamelCase , ) self.assertIn( f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , _UpperCamelCase , ) def UpperCamelCase( self ): _UpperCAmelCase = run_command( self.cmd + [ '''--config_file''', '''tests/test_configs/0_12_0.yaml''', '''--command''', self.command, '''--tpu_zone''', self.tpu_zone, '''--tpu_name''', self.tpu_name, '''--debug''', ] , return_stdout=_UpperCamelCase , ) self.assertIn( f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , _UpperCamelCase , ) def UpperCamelCase( self ): _UpperCAmelCase = run_command( self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--debug'''] , return_stdout=_UpperCamelCase ) self.assertIn( f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , _UpperCamelCase , ) def UpperCamelCase( self ): _UpperCAmelCase = run_command( self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command''', self.command, '''--debug'''] , return_stdout=_UpperCamelCase , ) self.assertIn( f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , _UpperCamelCase , ) def UpperCamelCase( self ): _UpperCAmelCase = run_command( self.cmd + [ '''--config_file''', '''tests/test_configs/latest.yaml''', '''--command''', self.command, '''--command''', '''echo "Hello World"''', '''--debug''', ] , return_stdout=_UpperCamelCase , ) self.assertIn( f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo "Hello World" --worker all''' , _UpperCamelCase , ) def UpperCamelCase( self ): _UpperCAmelCase = run_command( self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command_file''', self.command_file, '''--debug'''] , return_stdout=_UpperCamelCase , ) self.assertIn( f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , _UpperCamelCase , ) def UpperCamelCase( self ): _UpperCAmelCase = run_command( self.cmd + [ '''--config_file''', '''tests/test_configs/0_12_0.yaml''', '''--command_file''', self.command_file, '''--tpu_zone''', self.tpu_zone, '''--tpu_name''', self.tpu_name, '''--debug''', ] , return_stdout=_UpperCamelCase , ) self.assertIn( f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , _UpperCamelCase , ) def UpperCamelCase( self ): _UpperCAmelCase = run_command( self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--install_accelerate''', '''--debug'''] , return_stdout=_UpperCamelCase , ) self.assertIn( f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo "hello world"; echo "this is a second command" --worker all''' , _UpperCamelCase , ) def UpperCamelCase( self ): _UpperCAmelCase = run_command( self.cmd + [ '''--config_file''', '''tests/test_configs/latest.yaml''', '''--install_accelerate''', '''--accelerate_version''', '''12.0.0''', '''--debug''', ] , return_stdout=_UpperCamelCase , ) self.assertIn( f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo "hello world"; echo "this is a second command" --worker all''' , _UpperCamelCase , )
32
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, is_batched, to_numpy_array, valid_images, ) from ...utils import TensorType, logging UpperCAmelCase_ = logging.get_logger(__name__) class __UpperCamelCase ( A__ ): __A : int = ["""pixel_values"""] def __init__( self , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = PILImageResampling.BICUBIC , _UpperCamelCase = True , _UpperCamelCase = True , _UpperCamelCase = 1 / 255 , _UpperCamelCase = None , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ): super().__init__(**_UpperCamelCase ) _UpperCAmelCase = size if size is not None else {'''height''': 224, '''width''': 224} _UpperCAmelCase = get_size_dict(_UpperCamelCase ) _UpperCAmelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} _UpperCAmelCase = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase , param_name='''crop_size''' ) _UpperCAmelCase = do_resize _UpperCAmelCase = do_rescale _UpperCAmelCase = do_normalize _UpperCAmelCase = do_center_crop _UpperCAmelCase = crop_size _UpperCAmelCase = size _UpperCAmelCase = resample _UpperCAmelCase = rescale_factor _UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN _UpperCAmelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = PILImageResampling.BILINEAR , _UpperCamelCase = None , **_UpperCamelCase , ): _UpperCAmelCase = get_size_dict(_UpperCamelCase ) if "shortest_edge" in size: _UpperCAmelCase = get_resize_output_image_size(_UpperCamelCase , size=size['''shortest_edge'''] , default_to_square=_UpperCamelCase ) # size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"]) elif "height" in size and "width" in size: _UpperCAmelCase = (size['''height'''], size['''width''']) else: raise ValueError(f'''Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}''' ) return resize(_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ): _UpperCAmelCase = get_size_dict(_UpperCamelCase ) if "height" not in size or "width" not in size: raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(_UpperCamelCase , size=(size['''height'''], size['''width''']) , data_format=_UpperCamelCase , **_UpperCamelCase ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase ): return rescale(_UpperCamelCase , scale=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ): return normalize(_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = ChannelDimension.FIRST , **_UpperCamelCase , ): _UpperCAmelCase = do_resize if do_resize is not None else self.do_resize _UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale _UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize _UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop _UpperCAmelCase = crop_size if crop_size is not None else self.crop_size _UpperCAmelCase = get_size_dict(_UpperCamelCase , param_name='''crop_size''' , default_to_square=_UpperCamelCase ) _UpperCAmelCase = resample if resample is not None else self.resample _UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor _UpperCAmelCase = image_mean if image_mean is not None else self.image_mean _UpperCAmelCase = image_std if image_std is not None else self.image_std _UpperCAmelCase = size if size is not None else self.size _UpperCAmelCase = get_size_dict(_UpperCamelCase ) if not is_batched(_UpperCamelCase ): _UpperCAmelCase = [images] if not valid_images(_UpperCamelCase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) # All transformations expect numpy arrays. _UpperCAmelCase = [to_numpy_array(_UpperCamelCase ) for image in images] if do_resize: _UpperCAmelCase = [self.resize(image=_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase ) for image in images] if do_center_crop: _UpperCAmelCase = [self.center_crop(image=_UpperCamelCase , size=_UpperCamelCase ) for image in images] if do_rescale: _UpperCAmelCase = [self.rescale(image=_UpperCamelCase , scale=_UpperCamelCase ) for image in images] if do_normalize: _UpperCAmelCase = [self.normalize(image=_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase ) for image in images] _UpperCAmelCase = [to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase ) for image in images] _UpperCAmelCase = {'''pixel_values''': images} return BatchFeature(data=_UpperCamelCase , tensor_type=_UpperCamelCase )
32
1
def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> str: """simple docstring""" if not all(char in '''01''' for char in bin_string ): raise ValueError('''Non-binary value was passed to the function''' ) if not bin_string: raise ValueError('''Empty string was passed to the function''' ) _UpperCAmelCase = '''''' while len(SCREAMING_SNAKE_CASE_ ) % 3 != 0: _UpperCAmelCase = '''0''' + bin_string _UpperCAmelCase = [ bin_string[index : index + 3] for index in range(len(SCREAMING_SNAKE_CASE_ ) ) if index % 3 == 0 ] for bin_group in bin_string_in_3_list: _UpperCAmelCase = 0 for index, val in enumerate(SCREAMING_SNAKE_CASE_ ): oct_val += int(2 ** (2 - index) * int(SCREAMING_SNAKE_CASE_ ) ) oct_string += str(SCREAMING_SNAKE_CASE_ ) return oct_string if __name__ == "__main__": from doctest import testmod testmod()
32
from ..utils import DummyObject, requires_backends class __UpperCamelCase ( metaclass=A__ ): __A : str = ["""torch""", """scipy"""] def __init__( self , *_UpperCamelCase , **_UpperCamelCase ): requires_backends(self , ['''torch''', '''scipy'''] ) @classmethod def UpperCamelCase( cls , *_UpperCamelCase , **_UpperCamelCase ): requires_backends(cls , ['''torch''', '''scipy'''] ) @classmethod def UpperCamelCase( cls , *_UpperCamelCase , **_UpperCamelCase ): requires_backends(cls , ['''torch''', '''scipy'''] )
32
1
import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, AutoConfig, AutoFeatureExtractor, WavaVecaConfig, WavaVecaFeatureExtractor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 UpperCAmelCase_ = get_tests_dir("fixtures") UpperCAmelCase_ = get_tests_dir("fixtures/dummy_feature_extractor_config.json") UpperCAmelCase_ = get_tests_dir("fixtures/dummy-config.json") class __UpperCamelCase ( unittest.TestCase ): def UpperCamelCase( self ): _UpperCAmelCase = 0 def UpperCamelCase( self ): _UpperCAmelCase = AutoFeatureExtractor.from_pretrained('''facebook/wav2vec2-base-960h''' ) self.assertIsInstance(_UpperCamelCase , _UpperCamelCase ) def UpperCamelCase( self ): _UpperCAmelCase = AutoFeatureExtractor.from_pretrained(_UpperCamelCase ) self.assertIsInstance(_UpperCamelCase , _UpperCamelCase ) def UpperCamelCase( self ): with tempfile.TemporaryDirectory() as tmpdirname: _UpperCAmelCase = WavaVecaConfig() # remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally _UpperCAmelCase = AutoFeatureExtractor.from_pretrained(_UpperCamelCase ).to_dict() config_dict.pop('''feature_extractor_type''' ) _UpperCAmelCase = WavaVecaFeatureExtractor(**_UpperCamelCase ) # save in new folder model_config.save_pretrained(_UpperCamelCase ) config.save_pretrained(_UpperCamelCase ) _UpperCAmelCase = AutoFeatureExtractor.from_pretrained(_UpperCamelCase ) # make sure private variable is not incorrectly saved _UpperCAmelCase = json.loads(config.to_json_string() ) self.assertTrue('''_processor_class''' not in dict_as_saved ) self.assertIsInstance(_UpperCamelCase , _UpperCamelCase ) def UpperCamelCase( self ): _UpperCAmelCase = AutoFeatureExtractor.from_pretrained(_UpperCamelCase ) self.assertIsInstance(_UpperCamelCase , _UpperCamelCase ) def UpperCamelCase( self ): with self.assertRaisesRegex( _UpperCamelCase , '''bert-base is not a local folder and is not a valid model identifier''' ): _UpperCAmelCase = AutoFeatureExtractor.from_pretrained('''bert-base''' ) def UpperCamelCase( self ): with self.assertRaisesRegex( _UpperCamelCase , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): _UpperCAmelCase = AutoFeatureExtractor.from_pretrained(_UpperCamelCase , revision='''aaaaaa''' ) def UpperCamelCase( self ): with self.assertRaisesRegex( _UpperCamelCase , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ): _UpperCAmelCase = AutoFeatureExtractor.from_pretrained('''hf-internal-testing/config-no-model''' ) def UpperCamelCase( self ): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(_UpperCamelCase ): _UpperCAmelCase = AutoFeatureExtractor.from_pretrained( '''hf-internal-testing/test_dynamic_feature_extractor''' ) # If remote code is disabled, we can't load this config. with self.assertRaises(_UpperCamelCase ): _UpperCAmelCase = AutoFeatureExtractor.from_pretrained( '''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=_UpperCamelCase ) _UpperCAmelCase = AutoFeatureExtractor.from_pretrained( '''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=_UpperCamelCase ) self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' ) # Test feature extractor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(_UpperCamelCase ) _UpperCAmelCase = AutoFeatureExtractor.from_pretrained(_UpperCamelCase , trust_remote_code=_UpperCamelCase ) self.assertEqual(reloaded_feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' ) def UpperCamelCase( self ): try: AutoConfig.register('''custom''' , _UpperCamelCase ) AutoFeatureExtractor.register(_UpperCamelCase , _UpperCamelCase ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(_UpperCamelCase ): AutoFeatureExtractor.register(_UpperCamelCase , _UpperCamelCase ) # Now that the config is registered, it can be used as any other config with the auto-API _UpperCAmelCase = CustomFeatureExtractor.from_pretrained(_UpperCamelCase ) with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(_UpperCamelCase ) _UpperCAmelCase = AutoFeatureExtractor.from_pretrained(_UpperCamelCase ) self.assertIsInstance(_UpperCamelCase , _UpperCamelCase ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] def UpperCamelCase( self ): class __UpperCamelCase ( A__ ): __A : Dict = True try: AutoConfig.register('''custom''' , _UpperCamelCase ) AutoFeatureExtractor.register(_UpperCamelCase , _UpperCamelCase ) # If remote code is not set, the default is to use local _UpperCAmelCase = AutoFeatureExtractor.from_pretrained( '''hf-internal-testing/test_dynamic_feature_extractor''' ) self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' ) self.assertTrue(feature_extractor.is_local ) # If remote code is disabled, we load the local one. _UpperCAmelCase = AutoFeatureExtractor.from_pretrained( '''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=_UpperCamelCase ) self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' ) self.assertTrue(feature_extractor.is_local ) # If remote is enabled, we load from the Hub _UpperCAmelCase = AutoFeatureExtractor.from_pretrained( '''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=_UpperCamelCase ) self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' ) self.assertTrue(not hasattr(_UpperCamelCase , '''is_local''' ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
32
def A__ ( SCREAMING_SNAKE_CASE_ : int = 2_00_00_00 ) -> int: """simple docstring""" _UpperCAmelCase = [0 for i in range(n + 1 )] _UpperCAmelCase = 1 _UpperCAmelCase = 1 for i in range(2 , int(n**0.5 ) + 1 ): if primality_list[i] == 0: for j in range(i * i , n + 1 , SCREAMING_SNAKE_CASE_ ): _UpperCAmelCase = 1 _UpperCAmelCase = 0 for i in range(SCREAMING_SNAKE_CASE_ ): if primality_list[i] == 0: sum_of_primes += i return sum_of_primes if __name__ == "__main__": print(f'''{solution() = }''')
32
1
UpperCAmelCase_ = { "A": ".-", "B": "-...", "C": "-.-.", "D": "-..", "E": ".", "F": "..-.", "G": "--.", "H": "....", "I": "..", "J": ".---", "K": "-.-", "L": ".-..", "M": "--", "N": "-.", "O": "---", "P": ".--.", "Q": "--.-", "R": ".-.", "S": "...", "T": "-", "U": "..-", "V": "...-", "W": ".--", "X": "-..-", "Y": "-.--", "Z": "--..", "1": ".----", "2": "..---", "3": "...--", "4": "....-", "5": ".....", "6": "-....", "7": "--...", "8": "---..", "9": "----.", "0": "-----", "&": ".-...", "@": ".--.-.", ":": "---...", ",": "--..--", ".": ".-.-.-", "'": ".----.", "\"": ".-..-.", "?": "..--..", "/": "-..-.", "=": "-...-", "+": ".-.-.", "-": "-....-", "(": "-.--.", ")": "-.--.-", "!": "-.-.--", " ": "/" } # Exclamation mark is not in ITU-R recommendation # fmt: on UpperCAmelCase_ = {value: key for key, value in MORSE_CODE_DICT.items()} def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> str: """simple docstring""" return " ".join(MORSE_CODE_DICT[char] for char in message.upper() ) def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> str: """simple docstring""" return "".join(REVERSE_DICT[char] for char in message.split() ) def A__ ( ) -> None: """simple docstring""" _UpperCAmelCase = '''Morse code here!''' print(SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = encrypt(SCREAMING_SNAKE_CASE_ ) print(SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = decrypt(SCREAMING_SNAKE_CASE_ ) print(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": main()
32
import warnings from ...utils import logging from .image_processing_glpn import GLPNImageProcessor UpperCAmelCase_ = logging.get_logger(__name__) class __UpperCamelCase ( A__ ): def __init__( self , *_UpperCamelCase , **_UpperCamelCase ): warnings.warn( '''The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use GLPNImageProcessor instead.''' , _UpperCamelCase , ) super().__init__(*_UpperCamelCase , **_UpperCamelCase )
32
1
def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> bool: """simple docstring""" if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): _UpperCAmelCase = F'''Input value of [number={number}] must be an integer''' raise TypeError(SCREAMING_SNAKE_CASE_ ) if number < 0: return False _UpperCAmelCase = number * number while number > 0: if number % 10 != number_square % 10: return False number //= 10 number_square //= 10 return True if __name__ == "__main__": import doctest doctest.testmod()
32
from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __UpperCamelCase ( A__ ): __A : Dict = ["""image_processor""", """tokenizer"""] __A : List[str] = """BridgeTowerImageProcessor""" __A : str = ("""RobertaTokenizer""", """RobertaTokenizerFast""") def __init__( self , _UpperCamelCase , _UpperCamelCase ): super().__init__(_UpperCamelCase , _UpperCamelCase ) def __call__( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = True , _UpperCamelCase = False , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = 0 , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = True , _UpperCamelCase = None , **_UpperCamelCase , ): _UpperCAmelCase = self.tokenizer( text=_UpperCamelCase , add_special_tokens=_UpperCamelCase , padding=_UpperCamelCase , truncation=_UpperCamelCase , max_length=_UpperCamelCase , stride=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_token_type_ids=_UpperCamelCase , return_attention_mask=_UpperCamelCase , return_overflowing_tokens=_UpperCamelCase , return_special_tokens_mask=_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , return_length=_UpperCamelCase , verbose=_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase , ) # add pixel_values + pixel_mask _UpperCAmelCase = self.image_processor( _UpperCamelCase , return_tensors=_UpperCamelCase , do_normalize=_UpperCamelCase , do_center_crop=_UpperCamelCase , **_UpperCamelCase ) encoding.update(_UpperCamelCase ) return encoding def UpperCamelCase( self , *_UpperCamelCase , **_UpperCamelCase ): return self.tokenizer.batch_decode(*_UpperCamelCase , **_UpperCamelCase ) def UpperCamelCase( self , *_UpperCamelCase , **_UpperCamelCase ): return self.tokenizer.decode(*_UpperCamelCase , **_UpperCamelCase ) @property def UpperCamelCase( self ): _UpperCAmelCase = self.tokenizer.model_input_names _UpperCAmelCase = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
32
1
import argparse import torch from transformers import ( WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaForAudioFrameClassification, WavaVecaForSequenceClassification, WavaVecaForXVector, logging, ) logging.set_verbosity_info() UpperCAmelCase_ = logging.get_logger(__name__) def A__ ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = WavaVecaForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE_ , config=SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = downstream_dict['''projector.weight'''] _UpperCAmelCase = downstream_dict['''projector.bias'''] _UpperCAmelCase = downstream_dict['''model.post_net.linear.weight'''] _UpperCAmelCase = downstream_dict['''model.post_net.linear.bias'''] return model def A__ ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Dict: """simple docstring""" _UpperCAmelCase = WavaVecaForAudioFrameClassification.from_pretrained(SCREAMING_SNAKE_CASE_ , config=SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = downstream_dict['''model.linear.weight'''] _UpperCAmelCase = downstream_dict['''model.linear.bias'''] return model def A__ ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> int: """simple docstring""" _UpperCAmelCase = WavaVecaForXVector.from_pretrained(SCREAMING_SNAKE_CASE_ , config=SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = downstream_dict['''connector.weight'''] _UpperCAmelCase = downstream_dict['''connector.bias'''] for i, kernel_size in enumerate(hf_config.tdnn_kernel ): _UpperCAmelCase = downstream_dict[ F'''model.framelevel_feature_extractor.module.{i}.kernel.weight''' ] _UpperCAmelCase = downstream_dict[F'''model.framelevel_feature_extractor.module.{i}.kernel.bias'''] _UpperCAmelCase = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight'''] _UpperCAmelCase = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias'''] _UpperCAmelCase = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight'''] _UpperCAmelCase = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias'''] _UpperCAmelCase = downstream_dict['''objective.W'''] return model @torch.no_grad() def A__ ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ , map_location='''cpu''' ) _UpperCAmelCase = checkpoint['''Downstream'''] _UpperCAmelCase = WavaVecaConfig.from_pretrained(SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained( SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , do_normalize=SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = hf_config.architectures[0] if arch.endswith('''ForSequenceClassification''' ): _UpperCAmelCase = convert_classification(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) elif arch.endswith('''ForAudioFrameClassification''' ): _UpperCAmelCase = convert_diarization(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) elif arch.endswith('''ForXVector''' ): _UpperCAmelCase = convert_xvector(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else: raise NotImplementedError(F'''S3PRL weights conversion is not supported for {arch}''' ) if hf_config.use_weighted_layer_sum: _UpperCAmelCase = checkpoint['''Featurizer''']['''weights'''] hf_feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE_ ) hf_model.save_pretrained(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() parser.add_argument( "--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model." ) parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.") parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.") UpperCAmelCase_ = parser.parse_args() convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
32
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available UpperCAmelCase_ = { "configuration_xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMOnnxConfig"], "tokenization_xlm": ["XLMTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ "XLM_PRETRAINED_MODEL_ARCHIVE_LIST", "XLMForMultipleChoice", "XLMForQuestionAnswering", "XLMForQuestionAnsweringSimple", "XLMForSequenceClassification", "XLMForTokenClassification", "XLMModel", "XLMPreTrainedModel", "XLMWithLMHeadModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ "TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST", "TFXLMForMultipleChoice", "TFXLMForQuestionAnsweringSimple", "TFXLMForSequenceClassification", "TFXLMForTokenClassification", "TFXLMMainLayer", "TFXLMModel", "TFXLMPreTrainedModel", "TFXLMWithLMHeadModel", ] if TYPE_CHECKING: from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig from .tokenization_xlm import XLMTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm import ( XLM_PRETRAINED_MODEL_ARCHIVE_LIST, XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMPreTrainedModel, XLMWithLMHeadModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm import ( TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMForMultipleChoice, TFXLMForQuestionAnsweringSimple, TFXLMForSequenceClassification, TFXLMForTokenClassification, TFXLMMainLayer, TFXLMModel, TFXLMPreTrainedModel, TFXLMWithLMHeadModel, ) else: import sys UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
32
1
from __future__ import annotations def A__ ( SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : int ) -> tuple[float, list[float]]: """simple docstring""" _UpperCAmelCase = list(range(len(SCREAMING_SNAKE_CASE_ ) ) ) _UpperCAmelCase = [v / w for v, w in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )] index.sort(key=lambda SCREAMING_SNAKE_CASE_ : ratio[i] , reverse=SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = 0 _UpperCAmelCase = [0] * len(SCREAMING_SNAKE_CASE_ ) for i in index: if weight[i] <= capacity: _UpperCAmelCase = 1 max_value += value[i] capacity -= weight[i] else: _UpperCAmelCase = capacity / weight[i] max_value += value[i] * capacity / weight[i] break return max_value, fractions if __name__ == "__main__": import doctest doctest.testmod()
32
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { "microsoft/biogpt": "https://huggingface.co/microsoft/biogpt/resolve/main/config.json", # See all BioGPT models at https://huggingface.co/models?filter=biogpt } class __UpperCamelCase ( A__ ): __A : Any = """biogpt""" def __init__( self , _UpperCamelCase=42384 , _UpperCamelCase=1024 , _UpperCamelCase=24 , _UpperCamelCase=16 , _UpperCamelCase=4096 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=1024 , _UpperCamelCase=0.02 , _UpperCamelCase=1e-12 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=0.0 , _UpperCamelCase=0.0 , _UpperCamelCase=1 , _UpperCamelCase=0 , _UpperCamelCase=2 , **_UpperCamelCase , ): _UpperCAmelCase = vocab_size _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = scale_embedding _UpperCAmelCase = use_cache _UpperCAmelCase = layerdrop _UpperCAmelCase = activation_dropout super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
32
1
def A__ ( SCREAMING_SNAKE_CASE_ : bytes ) -> str: """simple docstring""" return "".join([hex(SCREAMING_SNAKE_CASE_ )[2:].zfill(2 ).upper() for byte in list(SCREAMING_SNAKE_CASE_ )] ) def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> bytes: """simple docstring""" if (len(SCREAMING_SNAKE_CASE_ ) % 2) != 0: raise ValueError( '''Base16 encoded data is invalid: Data does not have an even number of hex digits.''' ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(SCREAMING_SNAKE_CASE_ ) <= set('''0123456789ABCDEF''' ): raise ValueError( '''Base16 encoded data is invalid: Data is not uppercase hex or it contains invalid characters.''' ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , 2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
32
from typing import List from .keymap import KEYMAP, get_character def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> List[str]: """simple docstring""" def decorator(SCREAMING_SNAKE_CASE_ : List[Any] ): _UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''handle_key''' , [] ) handle += [key] setattr(SCREAMING_SNAKE_CASE_ , '''handle_key''' , SCREAMING_SNAKE_CASE_ ) return func return decorator def A__ ( *SCREAMING_SNAKE_CASE_ : List[str] ) -> Dict: """simple docstring""" def decorator(SCREAMING_SNAKE_CASE_ : Any ): _UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''handle_key''' , [] ) handle += keys setattr(SCREAMING_SNAKE_CASE_ , '''handle_key''' , SCREAMING_SNAKE_CASE_ ) return func return decorator class __UpperCamelCase ( A__ ): def __new__( cls , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): _UpperCAmelCase = super().__new__(cls , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) if not hasattr(_UpperCamelCase , '''key_handler''' ): setattr(_UpperCamelCase , '''key_handler''' , {} ) setattr(_UpperCamelCase , '''handle_input''' , KeyHandler.handle_input ) for value in attrs.values(): _UpperCAmelCase = getattr(_UpperCamelCase , '''handle_key''' , [] ) for key in handled_keys: _UpperCAmelCase = value return new_cls @staticmethod def UpperCamelCase( cls ): _UpperCAmelCase = get_character() if char != KEYMAP["undefined"]: _UpperCAmelCase = ord(_UpperCamelCase ) _UpperCAmelCase = cls.key_handler.get(_UpperCamelCase ) if handler: _UpperCAmelCase = char return handler(cls ) else: return None def A__ ( cls : Union[str, Any] ) -> Any: """simple docstring""" return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
32
1