code
stringlengths
82
53.2k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
"""simple docstring""" import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class A( unittest.TestCase ): """simple docstring""" def _UpperCamelCase( self ) -> Any: """simple docstring""" _UpperCamelCase :Tuple = [[1, 2, 4], [1, 2, 3, 4]] _UpperCamelCase :Tuple = DisjunctiveConstraint(__lowerCamelCase ) self.assertTrue(isinstance(dc.token_ids , __lowerCamelCase ) ) with self.assertRaises(__lowerCamelCase ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(__lowerCamelCase ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def _UpperCamelCase( self ) -> List[str]: """simple docstring""" _UpperCamelCase :Dict = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(__lowerCamelCase ): DisjunctiveConstraint(__lowerCamelCase ) # fails here def _UpperCamelCase( self ) -> Union[str, Any]: """simple docstring""" _UpperCamelCase :Union[str, Any] = [[1, 2, 3], [1, 2, 4]] _UpperCamelCase :Union[str, Any] = DisjunctiveConstraint(__lowerCamelCase ) _UpperCamelCase , _UpperCamelCase , _UpperCamelCase :Tuple = dc.update(1 ) _UpperCamelCase :int = stepped is True and completed is False and reset is False self.assertTrue(__lowerCamelCase ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) _UpperCamelCase , _UpperCamelCase , _UpperCamelCase :List[str] = dc.update(2 ) _UpperCamelCase :str = stepped is True and completed is False and reset is False self.assertTrue(__lowerCamelCase ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) _UpperCamelCase , _UpperCamelCase , _UpperCamelCase :str = dc.update(3 ) _UpperCamelCase :Tuple = stepped is True and completed is True and reset is False self.assertTrue(__lowerCamelCase ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def _UpperCamelCase( self ) -> List[str]: """simple docstring""" _UpperCamelCase :List[Any] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] _UpperCamelCase :str = DisjunctiveConstraint(__lowerCamelCase ) _UpperCamelCase , _UpperCamelCase , _UpperCamelCase :List[str] = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) _UpperCamelCase , _UpperCamelCase , _UpperCamelCase :int = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) _UpperCamelCase , _UpperCamelCase , _UpperCamelCase :Any = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) _UpperCamelCase , _UpperCamelCase , _UpperCamelCase :Optional[Any] = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() _UpperCamelCase , _UpperCamelCase , _UpperCamelCase :Dict = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) _UpperCamelCase , _UpperCamelCase , _UpperCamelCase :Tuple = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) _UpperCamelCase , _UpperCamelCase , _UpperCamelCase :Dict = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
355
"""simple docstring""" import itertools import os import random import tempfile import unittest import numpy as np from transformers import TvltFeatureExtractor, is_datasets_available from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_torch_available(): import torch if is_datasets_available(): from datasets import load_dataset snake_case = random.Random() def snake_case ( lowerCAmelCase_ , lowerCAmelCase_=1.0 , lowerCAmelCase_=None , lowerCAmelCase_=None ) -> List[str]: if rng is None: _snake_case = global_rng _snake_case = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class UpperCAmelCase ( unittest.TestCase ): def __init__( self : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any]=7 , __lowerCamelCase : List[Any]=4_0_0 , __lowerCamelCase : Any=2_0_0_0 , __lowerCamelCase : Any=2_0_4_8 , __lowerCamelCase : Any=1_2_8 , __lowerCamelCase : Any=1 , __lowerCamelCase : Optional[int]=5_1_2 , __lowerCamelCase : Tuple=3_0 , __lowerCamelCase : List[Any]=4_4_1_0_0 , ): """simple docstring""" _snake_case = parent _snake_case = batch_size _snake_case = min_seq_length _snake_case = max_seq_length _snake_case = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) _snake_case = spectrogram_length _snake_case = feature_size _snake_case = num_audio_channels _snake_case = hop_length _snake_case = chunk_length _snake_case = sampling_rate def __UpperCAmelCase ( self : List[Any] ): """simple docstring""" return { "spectrogram_length": self.spectrogram_length, "feature_size": self.feature_size, "num_audio_channels": self.num_audio_channels, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "sampling_rate": self.sampling_rate, } def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Any=False , __lowerCamelCase : int=False ): """simple docstring""" def _flatten(__lowerCamelCase : List[str] ): return list(itertools.chain(*__lowerCamelCase ) ) if equal_length: _snake_case = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size _snake_case = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: _snake_case = [np.asarray(__lowerCamelCase ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class UpperCAmelCase ( __SCREAMING_SNAKE_CASE,unittest.TestCase ): A__ : Tuple = TvltFeatureExtractor def __UpperCAmelCase ( self : int ): """simple docstring""" _snake_case = TvltFeatureExtractionTester(self ) def __UpperCAmelCase ( self : Any ): """simple docstring""" _snake_case = self.feature_extraction_class(**self.feat_extract_dict ) self.assertTrue(hasattr(__lowerCamelCase , '''spectrogram_length''' ) ) self.assertTrue(hasattr(__lowerCamelCase , '''feature_size''' ) ) self.assertTrue(hasattr(__lowerCamelCase , '''num_audio_channels''' ) ) self.assertTrue(hasattr(__lowerCamelCase , '''hop_length''' ) ) self.assertTrue(hasattr(__lowerCamelCase , '''chunk_length''' ) ) self.assertTrue(hasattr(__lowerCamelCase , '''sampling_rate''' ) ) def __UpperCAmelCase ( self : int ): """simple docstring""" _snake_case = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: _snake_case = feat_extract_first.save_pretrained(__lowerCamelCase )[0] check_json_file_has_correct_format(__lowerCamelCase ) _snake_case = self.feature_extraction_class.from_pretrained(__lowerCamelCase ) _snake_case = feat_extract_first.to_dict() _snake_case = feat_extract_second.to_dict() _snake_case = dict_first.pop('''mel_filters''' ) _snake_case = dict_second.pop('''mel_filters''' ) self.assertTrue(np.allclose(__lowerCamelCase , __lowerCamelCase ) ) self.assertEqual(__lowerCamelCase , __lowerCamelCase ) def __UpperCAmelCase ( self : int ): """simple docstring""" _snake_case = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: _snake_case = os.path.join(__lowerCamelCase , '''feat_extract.json''' ) feat_extract_first.to_json_file(__lowerCamelCase ) _snake_case = self.feature_extraction_class.from_json_file(__lowerCamelCase ) _snake_case = feat_extract_first.to_dict() _snake_case = feat_extract_second.to_dict() _snake_case = dict_first.pop('''mel_filters''' ) _snake_case = dict_second.pop('''mel_filters''' ) self.assertTrue(np.allclose(__lowerCamelCase , __lowerCamelCase ) ) self.assertEqual(__lowerCamelCase , __lowerCamelCase ) def __UpperCAmelCase ( self : Dict ): """simple docstring""" # Initialize feature_extractor _snake_case = self.feature_extraction_class(**self.feat_extract_dict ) # create three inputs of length 800, 1000, and 1200 _snake_case = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )] _snake_case = [np.asarray(__lowerCamelCase ) for speech_input in speech_inputs] # Test not batched input _snake_case = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test batched _snake_case = feature_extractor(__lowerCamelCase , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test audio masking _snake_case = feature_extractor( __lowerCamelCase , return_tensors='''np''' , sampling_rate=4_4_1_0_0 , mask_audio=__lowerCamelCase ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test 2-D numpy arrays are batched. _snake_case = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)] _snake_case = np.asarray(__lowerCamelCase ) _snake_case = feature_extractor(__lowerCamelCase , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Optional[int] ): """simple docstring""" _snake_case = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' ) # automatic decoding with librispeech _snake_case = ds.sort('''id''' ).select(range(__lowerCamelCase ) )[:num_samples]['''audio'''] return [x["array"] for x in speech_samples] def __UpperCAmelCase ( self : Dict ): """simple docstring""" _snake_case = self._load_datasamples(1 ) _snake_case = TvltFeatureExtractor() _snake_case = feature_extractor(__lowerCamelCase , return_tensors='''pt''' ).audio_values self.assertEquals(audio_values.shape , (1, 1, 1_9_2, 1_2_8) ) _snake_case = torch.tensor([[-0.3_0_3_2, -0.2_7_0_8], [-0.4_4_3_4, -0.4_0_0_7]] ) self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , __lowerCamelCase , atol=1E-4 ) )
103
0
"""simple docstring""" import json import os import unittest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_ftfy, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __A ( A_ ,unittest.TestCase ): '''simple docstring''' lowerCAmelCase : Any = CLIPTokenizer lowerCAmelCase : List[str] = CLIPTokenizerFast lowerCAmelCase : str = True lowerCAmelCase : Tuple = {} lowerCAmelCase : List[Any] = False def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" super().setUp() # fmt: off lowercase__ : Optional[Any] = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>'''] # fmt: on lowercase__ : int = dict(zip(_snake_case ,range(len(_snake_case ) ) ) ) lowercase__ : Any = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>'''] lowercase__ : Optional[Any] = {'''unk_token''': '''<unk>'''} lowercase__ : Optional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] ) lowercase__ : Tuple = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file ,'''w''' ,encoding='''utf-8''' ) as fp: fp.write(json.dumps(_snake_case ) + '''\n''' ) with open(self.merges_file ,'''w''' ,encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(_snake_case ) ) def UpperCAmelCase ( self : Union[str, Any] ,**_snake_case : List[Any] ) -> int: """simple docstring""" kwargs.update(self.special_tokens_map ) return CLIPTokenizer.from_pretrained(self.tmpdirname ,**_snake_case ) def UpperCAmelCase ( self : Optional[int] ,**_snake_case : Dict ) -> Any: """simple docstring""" kwargs.update(self.special_tokens_map ) return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**_snake_case ) def UpperCAmelCase ( self : Optional[int] ,_snake_case : int ) -> Optional[int]: """simple docstring""" lowercase__ : str = '''lower newer''' lowercase__ : Dict = '''lower newer''' return input_text, output_text def UpperCAmelCase ( self : Tuple ) -> List[str]: """simple docstring""" lowercase__ : int = CLIPTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map ) lowercase__ : List[Any] = '''lower newer''' lowercase__ : Tuple = ['''lo''', '''w''', '''er</w>''', '''n''', '''e''', '''w''', '''er</w>'''] lowercase__ : Tuple = tokenizer.tokenize(_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) lowercase__ : List[Any] = tokens + [tokenizer.unk_token] lowercase__ : str = [10, 2, 16, 9, 3, 2, 16, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) ,_snake_case ) @require_ftfy def UpperCAmelCase ( self : Any ) -> Any: """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): lowercase__ : Any = self.tokenizer_class.from_pretrained(_snake_case ,**_snake_case ) lowercase__ : Dict = self.rust_tokenizer_class.from_pretrained(_snake_case ,**_snake_case ) lowercase__ : Dict = '''A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.''' lowercase__ : int = tokenizer_s.tokenize(_snake_case ) lowercase__ : Any = tokenizer_r.tokenize(_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) # Test that the tokenization is identical on an example containing a character (Latin Small Letter A # with Tilde) encoded in 2 different ways lowercase__ : Any = '''xa\u0303y''' + ''' ''' + '''x\xe3y''' lowercase__ : List[str] = tokenizer_s.tokenize(_snake_case ) lowercase__ : Union[str, Any] = tokenizer_r.tokenize(_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) # Test that the tokenization is identical on unicode of space type lowercase__ : str = [ '''\u0009''', # (horizontal tab, '\t') '''\u000B''', # (vertical tab) '''\u000C''', # (form feed) '''\u0020''', # (space, ' ') '''\u200E''', # (left-to-right mark):w '''\u200F''', # (right-to-left mark) ] for unicode_seq in spaces_unicodes: lowercase__ : str = tokenizer_s.tokenize(_snake_case ) lowercase__ : List[str] = tokenizer_r.tokenize(_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) # Test that the tokenization is identical on unicode of line break type lowercase__ : Optional[Any] = [ '''\u000A''', # (line feed, '\n') '''\r\n''', # (carriage return and line feed, '\r\n') '''\u000D''', # (carriage return, '\r') '''\r''', # (carriage return, '\r') '''\u000D''', # (carriage return, '\r') '''\u2028''', # (line separator) '''\u2029''', # (paragraph separator) # "\u0085", # (next line) ] # The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms # it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a # space (and thus into an empty list). for unicode_seq in line_break_unicodes: lowercase__ : Dict = tokenizer_s.tokenize(_snake_case ) lowercase__ : Optional[int] = tokenizer_r.tokenize(_snake_case ) self.assertListEqual(_snake_case ,_snake_case ) def UpperCAmelCase ( self : Optional[int] ) -> List[str]: """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): lowercase__ : Any = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name` lowercase__ : Union[str, Any] = f"""{text_of_1_token} {text_of_1_token}""" lowercase__ : Dict = self.rust_tokenizer_class.from_pretrained( _snake_case ,use_fast=_snake_case ,) lowercase__ : List[Any] = tokenizer_r(_snake_case ,return_offsets_mapping=_snake_case ,add_special_tokens=_snake_case ) self.assertEqual(encoding.offset_mapping[0] ,(0, len(_snake_case )) ) self.assertEqual( encoding.offset_mapping[1] ,(len(_snake_case ) + 1, len(_snake_case ) + 1 + len(_snake_case )) ,) lowercase__ : List[str] = f""" {text}""" lowercase__ : Optional[int] = self.rust_tokenizer_class.from_pretrained( _snake_case ,use_fast=_snake_case ,) lowercase__ : Tuple = tokenizer_r(_snake_case ,return_offsets_mapping=_snake_case ,add_special_tokens=_snake_case ) self.assertEqual(encoding.offset_mapping[0] ,(1, 1 + len(_snake_case )) ) self.assertEqual( encoding.offset_mapping[1] ,(1 + len(_snake_case ) + 1, 1 + len(_snake_case ) + 1 + len(_snake_case )) ,) def UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" with self.assertRaises(_snake_case ) as context: self.rust_tokenizer_class.from_pretrained('''robot-test/old-clip-tokenizer''' ) self.assertTrue( context.exception.args[0].startswith( '''The `backend_tokenizer` provided does not match the expected format.''' ) ) @require_ftfy def UpperCAmelCase ( self : str ) -> Union[str, Any]: """simple docstring""" super().test_tokenization_python_rust_equals() def UpperCAmelCase ( self : Tuple ) -> Optional[int]: """simple docstring""" pass
122
"""simple docstring""" import unittest from parameterized import parameterized from transformers import OpenLlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel class __A : '''simple docstring''' def __init__( self : Tuple ,_snake_case : Tuple ,_snake_case : Dict=13 ,_snake_case : Optional[int]=7 ,_snake_case : List[str]=True ,_snake_case : Optional[Any]=True ,_snake_case : str=False ,_snake_case : Optional[int]=True ,_snake_case : int=99 ,_snake_case : int=32 ,_snake_case : str=5 ,_snake_case : Any=4 ,_snake_case : str=37 ,_snake_case : str="gelu" ,_snake_case : Optional[Any]=0.1 ,_snake_case : Union[str, Any]=0.1 ,_snake_case : str=512 ,_snake_case : Dict=16 ,_snake_case : Dict=2 ,_snake_case : Tuple=0.02 ,_snake_case : int=3 ,_snake_case : Optional[int]=4 ,_snake_case : int=None ,) -> Tuple: """simple docstring""" lowercase__ : Optional[Any] = parent lowercase__ : List[str] = batch_size lowercase__ : str = seq_length lowercase__ : Tuple = is_training lowercase__ : List[str] = use_input_mask lowercase__ : Optional[Any] = use_token_type_ids lowercase__ : str = use_labels lowercase__ : Any = vocab_size lowercase__ : str = hidden_size lowercase__ : int = num_hidden_layers lowercase__ : int = num_attention_heads lowercase__ : Optional[int] = intermediate_size lowercase__ : Dict = hidden_act lowercase__ : Optional[int] = hidden_dropout_prob lowercase__ : Optional[int] = attention_probs_dropout_prob lowercase__ : List[str] = max_position_embeddings lowercase__ : Union[str, Any] = type_vocab_size lowercase__ : Any = type_sequence_label_size lowercase__ : List[str] = initializer_range lowercase__ : Tuple = num_labels lowercase__ : int = num_choices lowercase__ : Optional[int] = scope def UpperCAmelCase ( self : str ) -> Tuple: """simple docstring""" lowercase__ : int = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) lowercase__ : Optional[int] = None if self.use_input_mask: lowercase__ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) lowercase__ : Tuple = None if self.use_token_type_ids: lowercase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size ) lowercase__ : Optional[Any] = None lowercase__ : Any = None lowercase__ : List[Any] = None if self.use_labels: lowercase__ : List[str] = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) lowercase__ : str = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) lowercase__ : List[Any] = ids_tensor([self.batch_size] ,self.num_choices ) lowercase__ : str = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase ( self : int ) -> List[str]: """simple docstring""" return OpenLlamaConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=_snake_case ,initializer_range=self.initializer_range ,use_stable_embedding=_snake_case ,) def UpperCAmelCase ( self : Optional[Any] ,_snake_case : List[Any] ,_snake_case : Tuple ,_snake_case : int ,_snake_case : Optional[Any] ,_snake_case : Optional[int] ,_snake_case : Tuple ,_snake_case : Tuple ) -> Optional[int]: """simple docstring""" lowercase__ : Optional[int] = OpenLlamaModel(config=_snake_case ) model.to(_snake_case ) model.eval() lowercase__ : Optional[Any] = model(_snake_case ,attention_mask=_snake_case ) lowercase__ : Dict = model(_snake_case ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase ( self : List[str] ,_snake_case : Optional[int] ,_snake_case : Tuple ,_snake_case : int ,_snake_case : Tuple ,_snake_case : Union[str, Any] ,_snake_case : str ,_snake_case : List[str] ,_snake_case : List[str] ,_snake_case : Optional[Any] ,) -> Optional[Any]: """simple docstring""" lowercase__ : List[str] = True lowercase__ : Tuple = OpenLlamaModel(_snake_case ) model.to(_snake_case ) model.eval() lowercase__ : Dict = model( _snake_case ,attention_mask=_snake_case ,encoder_hidden_states=_snake_case ,encoder_attention_mask=_snake_case ,) lowercase__ : str = model( _snake_case ,attention_mask=_snake_case ,encoder_hidden_states=_snake_case ,) lowercase__ : Optional[Any] = model(_snake_case ,attention_mask=_snake_case ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase ( self : List[Any] ,_snake_case : Optional[Any] ,_snake_case : Union[str, Any] ,_snake_case : List[str] ,_snake_case : List[Any] ,_snake_case : Any ,_snake_case : List[Any] ,_snake_case : List[str] ,_snake_case : List[Any] ,_snake_case : Union[str, Any] ,) -> Dict: """simple docstring""" lowercase__ : int = OpenLlamaForCausalLM(config=_snake_case ) model.to(_snake_case ) model.eval() lowercase__ : List[str] = model(_snake_case ,attention_mask=_snake_case ,labels=_snake_case ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : Optional[int] ,_snake_case : Dict ,_snake_case : Union[str, Any] ,_snake_case : Tuple ,_snake_case : str ,_snake_case : int ,_snake_case : List[Any] ,_snake_case : Optional[Any] ,_snake_case : Union[str, Any] ,) -> int: """simple docstring""" lowercase__ : List[Any] = True lowercase__ : Tuple = True lowercase__ : Optional[int] = OpenLlamaForCausalLM(config=_snake_case ) model.to(_snake_case ) model.eval() # first forward pass lowercase__ : List[str] = model( _snake_case ,attention_mask=_snake_case ,encoder_hidden_states=_snake_case ,encoder_attention_mask=_snake_case ,use_cache=_snake_case ,) lowercase__ : Union[str, Any] = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids lowercase__ : List[Any] = ids_tensor((self.batch_size, 3) ,config.vocab_size ) lowercase__ : Tuple = ids_tensor((self.batch_size, 3) ,vocab_size=2 ) # append to next input_ids and lowercase__ : str = torch.cat([input_ids, next_tokens] ,dim=-1 ) lowercase__ : Tuple = torch.cat([input_mask, next_mask] ,dim=-1 ) lowercase__ : Tuple = model( _snake_case ,attention_mask=_snake_case ,encoder_hidden_states=_snake_case ,encoder_attention_mask=_snake_case ,output_hidden_states=_snake_case ,)['''hidden_states'''][0] lowercase__ : Union[str, Any] = model( _snake_case ,attention_mask=_snake_case ,encoder_hidden_states=_snake_case ,encoder_attention_mask=_snake_case ,past_key_values=_snake_case ,output_hidden_states=_snake_case ,)['''hidden_states'''][0] # select random slice lowercase__ : Optional[int] = ids_tensor((1,) ,output_from_past.shape[-1] ).item() lowercase__ : List[str] = output_from_no_past[:, -3:, random_slice_idx].detach() lowercase__ : int = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(_snake_case ,_snake_case ,atol=1e-3 ) ) def UpperCAmelCase ( self : List[Any] ) -> List[Any]: """simple docstring""" lowercase__ : Optional[Any] = self.prepare_config_and_inputs() ( ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ) : str = config_and_inputs lowercase__ : Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __A ( A_ ,A_ ,A_ ,unittest.TestCase ): '''simple docstring''' lowerCAmelCase : Optional[Any] = ( (OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else () ) lowerCAmelCase : List[str] = (OpenLlamaForCausalLM,) if is_torch_available() else () lowerCAmelCase : int = ( { "feature-extraction": OpenLlamaModel, "text-classification": OpenLlamaForSequenceClassification, "text-generation": OpenLlamaForCausalLM, "zero-shot": OpenLlamaForSequenceClassification, } if is_torch_available() else {} ) lowerCAmelCase : Union[str, Any] = False lowerCAmelCase : Any = False def UpperCAmelCase ( self : Dict ) -> int: """simple docstring""" lowercase__ : Tuple = OpenLlamaModelTester(self ) lowercase__ : Tuple = ConfigTester(self ,config_class=_snake_case ,hidden_size=37 ) def UpperCAmelCase ( self : List[Any] ) -> Optional[int]: """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase ( self : Tuple ) -> Union[str, Any]: """simple docstring""" lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_snake_case ) def UpperCAmelCase ( self : Any ) -> Dict: """simple docstring""" lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowercase__ : int = type self.model_tester.create_and_check_model(*_snake_case ) def UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : Optional[Any] = 3 lowercase__ : List[str] = input_dict['''input_ids'''] lowercase__ : Optional[Any] = input_ids.ne(1 ).to(_snake_case ) lowercase__ : Optional[int] = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size ) lowercase__ : Dict = OpenLlamaForSequenceClassification(_snake_case ) model.to(_snake_case ) model.eval() lowercase__ : Any = model(_snake_case ,attention_mask=_snake_case ,labels=_snake_case ) self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) ) def UpperCAmelCase ( self : str ) -> Optional[int]: """simple docstring""" lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : Optional[int] = 3 lowercase__ : str = '''single_label_classification''' lowercase__ : Optional[Any] = input_dict['''input_ids'''] lowercase__ : Any = input_ids.ne(1 ).to(_snake_case ) lowercase__ : List[Any] = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size ) lowercase__ : Any = OpenLlamaForSequenceClassification(_snake_case ) model.to(_snake_case ) model.eval() lowercase__ : Union[str, Any] = model(_snake_case ,attention_mask=_snake_case ,labels=_snake_case ) self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) ) def UpperCAmelCase ( self : Optional[Any] ) -> Dict: """simple docstring""" lowercase__ , lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : Optional[int] = 3 lowercase__ : Tuple = '''multi_label_classification''' lowercase__ : str = input_dict['''input_ids'''] lowercase__ : Union[str, Any] = input_ids.ne(1 ).to(_snake_case ) lowercase__ : List[Any] = ids_tensor( [self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float ) lowercase__ : Optional[Any] = OpenLlamaForSequenceClassification(_snake_case ) model.to(_snake_case ) model.eval() lowercase__ : List[str] = model(_snake_case ,attention_mask=_snake_case ,labels=_snake_case ) self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip('''Open-Llama buffers include complex numbers, which breaks this test''' ) def UpperCAmelCase ( self : List[str] ) -> Optional[int]: """simple docstring""" pass @parameterized.expand([('''linear''',), ('''dynamic''',)] ) def UpperCAmelCase ( self : Optional[Any] ,_snake_case : List[Any] ) -> Optional[int]: """simple docstring""" lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : Optional[int] = ids_tensor([1, 10] ,config.vocab_size ) lowercase__ : int = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights lowercase__ : Optional[int] = OpenLlamaModel(_snake_case ) original_model.to(_snake_case ) original_model.eval() lowercase__ : List[Any] = original_model(_snake_case ).last_hidden_state lowercase__ : Optional[Any] = original_model(_snake_case ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights lowercase__ : Optional[int] = {'''type''': scaling_type, '''factor''': 10.0} lowercase__ : Optional[int] = OpenLlamaModel(_snake_case ) scaled_model.to(_snake_case ) scaled_model.eval() lowercase__ : Optional[int] = scaled_model(_snake_case ).last_hidden_state lowercase__ : Optional[Any] = scaled_model(_snake_case ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(_snake_case ,_snake_case ,atol=1e-5 ) ) else: self.assertFalse(torch.allclose(_snake_case ,_snake_case ,atol=1e-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(_snake_case ,_snake_case ,atol=1e-5 ) )
122
1
'''simple docstring''' def __snake_case ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[Any] ): '''simple docstring''' if density <= 0: raise ValueError("Impossible fluid density" ) if bulk_modulus <= 0: raise ValueError("Impossible bulk modulus" ) return (bulk_modulus / density) ** 0.5 if __name__ == "__main__": import doctest doctest.testmod()
664
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { "google/pix2struct-textcaps-base": ( "https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json" ), } class lowerCAmelCase__ ( __lowercase ): UpperCamelCase_ : Union[str, Any] = "pix2struct_text_model" UpperCamelCase_ : str = ["past_key_values"] UpperCamelCase_ : str = { "hidden_size": "hidden_size", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self , a=5_02_44 , a=7_68 , a=64 , a=20_48 , a=12 , a=12 , a=32 , a=1_28 , a=0.1 , a=1e-6 , a=1.0 , a="gelu_new" , a=0 , a=False , a=0 , a=1 , a=False , a=True , **a , ) -> Optional[int]: '''simple docstring''' _UpperCamelCase = vocab_size _UpperCamelCase = hidden_size _UpperCamelCase = d_kv _UpperCamelCase = d_ff _UpperCamelCase = num_layers _UpperCamelCase = num_heads _UpperCamelCase = relative_attention_num_buckets _UpperCamelCase = relative_attention_max_distance _UpperCamelCase = dropout_rate _UpperCamelCase = layer_norm_epsilon _UpperCamelCase = initializer_factor _UpperCamelCase = use_cache _UpperCamelCase = eos_token_id _UpperCamelCase = decoder_start_token_id # for backwards compatibility _UpperCamelCase = dense_act_fn super().__init__( pad_token_id=a , eos_token_id=a , decoder_start_token_id=a , tie_word_embeddings=a , is_decoder=a , **a , ) @classmethod def A_ ( cls , a , **a ) -> "PretrainedConfig": '''simple docstring''' cls._set_token_in_kwargs(a ) _UpperCamelCase , _UpperCamelCase = cls.get_config_dict(a , **a ) # get the text config dict if we are loading from Pix2StructConfig if config_dict.get("""model_type""" ) == "pix2struct": _UpperCamelCase = config_dict["""text_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(a , **a ) class lowerCAmelCase__ ( __lowercase ): UpperCamelCase_ : int = "pix2struct_vision_model" def __init__( self , a=7_68 , a=7_68 , a=20_48 , a=64 , a=12 , a=12 , a="gelu_new" , a=1e-6 , a=0.0 , a=0.0 , a=1e-10 , a=1.0 , a=40_96 , a=32 , a=1_28 , **a , ) -> Tuple: '''simple docstring''' super().__init__(**a ) _UpperCamelCase = hidden_size _UpperCamelCase = patch_embed_hidden_size _UpperCamelCase = d_ff _UpperCamelCase = dropout_rate _UpperCamelCase = num_hidden_layers _UpperCamelCase = num_attention_heads _UpperCamelCase = initializer_range _UpperCamelCase = initializer_factor _UpperCamelCase = attention_dropout _UpperCamelCase = layer_norm_eps _UpperCamelCase = dense_act_fn _UpperCamelCase = seq_len _UpperCamelCase = relative_attention_num_buckets _UpperCamelCase = relative_attention_max_distance _UpperCamelCase = d_kv @classmethod def A_ ( cls , a , **a ) -> "PretrainedConfig": '''simple docstring''' cls._set_token_in_kwargs(a ) _UpperCamelCase , _UpperCamelCase = cls.get_config_dict(a , **a ) # get the vision config dict if we are loading from Pix2StructConfig if config_dict.get("""model_type""" ) == "pix2struct": _UpperCamelCase = config_dict["""vision_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(a , **a ) class lowerCAmelCase__ ( __lowercase ): UpperCamelCase_ : Dict = "pix2struct" UpperCamelCase_ : int = True def __init__( self , a=None , a=None , a=1.0 , a=0.02 , a=False , a=False , a=True , **a , ) -> Optional[Any]: '''simple docstring''' super().__init__(tie_word_embeddings=a , is_encoder_decoder=a , **a ) if text_config is None: _UpperCamelCase = {} logger.info("""text_config is None. Initializing the Pix2StructTextConfig with default values.""" ) if vision_config is None: _UpperCamelCase = {} logger.info("""vision_config is None. Initializing the Pix2StructVisionConfig with default values.""" ) _UpperCamelCase = PixaStructTextConfig(**a ) _UpperCamelCase = PixaStructVisionConfig(**a ) _UpperCamelCase = self.text_config.decoder_start_token_id _UpperCamelCase = self.text_config.pad_token_id _UpperCamelCase = self.text_config.eos_token_id _UpperCamelCase = initializer_factor _UpperCamelCase = initializer_range _UpperCamelCase = self.initializer_range _UpperCamelCase = self.initializer_range _UpperCamelCase = is_vqa @classmethod def A_ ( cls , a , a , **a ) -> str: '''simple docstring''' return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **a ) def A_ ( self ) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = copy.deepcopy(self.__dict__ ) _UpperCamelCase = self.text_config.to_dict() _UpperCamelCase = self.vision_config.to_dict() _UpperCamelCase = self.__class__.model_type return output
612
0
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging snake_case_ : str = logging.get_logger(__name__) snake_case_ : str = { '''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''', } class A__ ( UpperCamelCase__ ): UpperCAmelCase = "data2vec-text" def __init__( self : Optional[int] , _a : Union[str, Any]=3_0522 , _a : Any=768 , _a : List[Any]=12 , _a : Any=12 , _a : List[str]=3072 , _a : Optional[Any]="gelu" , _a : List[str]=0.1 , _a : Optional[Any]=0.1 , _a : Optional[Any]=512 , _a : Any=2 , _a : Optional[Any]=0.02 , _a : Dict=1E-12 , _a : int=1 , _a : Any=0 , _a : List[Any]=2 , _a : Dict="absolute" , _a : Optional[Any]=True , _a : Dict=None , **_a : Optional[Any] , ) -> Union[str, Any]: """simple docstring""" super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a ) _SCREAMING_SNAKE_CASE =vocab_size _SCREAMING_SNAKE_CASE =hidden_size _SCREAMING_SNAKE_CASE =num_hidden_layers _SCREAMING_SNAKE_CASE =num_attention_heads _SCREAMING_SNAKE_CASE =hidden_act _SCREAMING_SNAKE_CASE =intermediate_size _SCREAMING_SNAKE_CASE =hidden_dropout_prob _SCREAMING_SNAKE_CASE =attention_probs_dropout_prob _SCREAMING_SNAKE_CASE =max_position_embeddings _SCREAMING_SNAKE_CASE =type_vocab_size _SCREAMING_SNAKE_CASE =initializer_range _SCREAMING_SNAKE_CASE =layer_norm_eps _SCREAMING_SNAKE_CASE =position_embedding_type _SCREAMING_SNAKE_CASE =use_cache _SCREAMING_SNAKE_CASE =classifier_dropout class A__ ( UpperCamelCase__ ): @property def __UpperCamelCase ( self : List[str] ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "multiple-choice": _SCREAMING_SNAKE_CASE ={0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: _SCREAMING_SNAKE_CASE ={0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
191
import importlib import os from dataclasses import dataclass from enum import Enum from typing import Any, Dict, Optional, Union import torch from ..utils import BaseOutput snake_case_ : Optional[Any] = '''scheduler_config.json''' class A__ ( UpperCamelCase__ ): UpperCAmelCase = 1 UpperCAmelCase = 2 UpperCAmelCase = 3 UpperCAmelCase = 4 UpperCAmelCase = 5 UpperCAmelCase = 6 UpperCAmelCase = 7 UpperCAmelCase = 8 UpperCAmelCase = 9 UpperCAmelCase = 10 UpperCAmelCase = 11 UpperCAmelCase = 12 UpperCAmelCase = 13 UpperCAmelCase = 14 @dataclass class A__ ( UpperCamelCase__ ): UpperCAmelCase = 42 class A__ : UpperCAmelCase = SCHEDULER_CONFIG_NAME UpperCAmelCase = [] UpperCAmelCase = True @classmethod def __UpperCamelCase ( cls : List[str] , _a : Dict[str, Any] = None , _a : Optional[str] = None , _a : Optional[Any]=False , **_a : Dict , ) -> Dict: """simple docstring""" _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =cls.load_config( pretrained_model_name_or_path=_a , subfolder=_a , return_unused_kwargs=_a , return_commit_hash=_a , **_a , ) return cls.from_config(_a , return_unused_kwargs=_a , **_a ) def __UpperCamelCase ( self : Dict , _a : Union[str, os.PathLike] , _a : bool = False , **_a : int ) -> Dict: """simple docstring""" self.save_config(save_directory=_a , push_to_hub=_a , **_a ) @property def __UpperCamelCase ( self : List[str] ) -> List[Any]: """simple docstring""" return self._get_compatibles() @classmethod def __UpperCamelCase ( cls : List[Any] ) -> str: """simple docstring""" _SCREAMING_SNAKE_CASE =list(set([cls.__name__] + cls._compatibles ) ) _SCREAMING_SNAKE_CASE =importlib.import_module(__name__.split('''.''' )[0] ) _SCREAMING_SNAKE_CASE =[ getattr(_a , _a ) for c in compatible_classes_str if hasattr(_a , _a ) ] return compatible_classes
191
1
'''simple docstring''' from ...processing_utils import ProcessorMixin class lowercase__ ( snake_case_ ): '''simple docstring''' _snake_case = '''SpeechT5FeatureExtractor''' _snake_case = '''SpeechT5Tokenizer''' def __init__( self , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' super().__init__(lowerCamelCase__ , lowerCamelCase__ ) def __call__( self , *lowerCamelCase__ , **lowerCamelCase__ ): '''simple docstring''' UpperCamelCase = kwargs.pop('''audio''' , lowerCamelCase__ ) UpperCamelCase = kwargs.pop('''text''' , lowerCamelCase__ ) UpperCamelCase = kwargs.pop('''text_target''' , lowerCamelCase__ ) UpperCamelCase = kwargs.pop('''audio_target''' , lowerCamelCase__ ) UpperCamelCase = kwargs.pop('''sampling_rate''' , lowerCamelCase__ ) if audio is not None and text is not None: raise ValueError( '''Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?''' ) if audio_target is not None and text_target is not None: raise ValueError( '''Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?''' ) if audio is None and audio_target is None and text is None and text_target is None: raise ValueError( '''You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.''' ) if audio is not None: UpperCamelCase = self.feature_extractor(lowerCamelCase__ , *lowerCamelCase__ , sampling_rate=lowerCamelCase__ , **lowerCamelCase__ ) elif text is not None: UpperCamelCase = self.tokenizer(lowerCamelCase__ , **lowerCamelCase__ ) else: UpperCamelCase = None if audio_target is not None: UpperCamelCase = self.feature_extractor(audio_target=lowerCamelCase__ , *lowerCamelCase__ , sampling_rate=lowerCamelCase__ , **lowerCamelCase__ ) UpperCamelCase = targets['''input_values'''] elif text_target is not None: UpperCamelCase = self.tokenizer(lowerCamelCase__ , **lowerCamelCase__ ) UpperCamelCase = targets['''input_ids'''] else: UpperCamelCase = None if inputs is None: return targets if targets is not None: UpperCamelCase = labels UpperCamelCase = targets.get('''attention_mask''' ) if decoder_attention_mask is not None: UpperCamelCase = decoder_attention_mask return inputs def UpperCAmelCase ( self , *lowerCamelCase__ , **lowerCamelCase__ ): '''simple docstring''' UpperCamelCase = kwargs.pop('''input_values''' , lowerCamelCase__ ) UpperCamelCase = kwargs.pop('''input_ids''' , lowerCamelCase__ ) UpperCamelCase = kwargs.pop('''labels''' , lowerCamelCase__ ) if input_values is not None and input_ids is not None: raise ValueError('''Cannot process both `input_values` and `input_ids` inputs.''' ) if input_values is None and input_ids is None and labels is None: raise ValueError( '''You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.''' ) if input_values is not None: UpperCamelCase = self.feature_extractor.pad(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ ) elif input_ids is not None: UpperCamelCase = self.tokenizer.pad(lowerCamelCase__ , **lowerCamelCase__ ) else: UpperCamelCase = None if labels is not None: if "input_ids" in labels or (isinstance(lowerCamelCase__ , lowerCamelCase__ ) and "input_ids" in labels[0]): UpperCamelCase = self.tokenizer.pad(lowerCamelCase__ , **lowerCamelCase__ ) UpperCamelCase = targets['''input_ids'''] else: UpperCamelCase = self.feature_extractor.feature_size UpperCamelCase = self.feature_extractor.num_mel_bins UpperCamelCase = self.feature_extractor.pad(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ ) UpperCamelCase = feature_size_hack UpperCamelCase = targets['''input_values'''] else: UpperCamelCase = None if inputs is None: return targets if targets is not None: UpperCamelCase = labels UpperCamelCase = targets.get('''attention_mask''' ) if decoder_attention_mask is not None: UpperCamelCase = decoder_attention_mask return inputs def UpperCAmelCase ( self , *lowerCamelCase__ , **lowerCamelCase__ ): '''simple docstring''' return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__ ) def UpperCAmelCase ( self , *lowerCamelCase__ , **lowerCamelCase__ ): '''simple docstring''' return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__ )
212
'''simple docstring''' from math import factorial def __snake_case ( _UpperCAmelCase : int = 100): return sum(map(_UpperCAmelCase, str(factorial(_UpperCAmelCase)))) if __name__ == "__main__": print(solution(int(input('Enter the Number: ').strip())))
212
1
from ... import PretrainedConfig snake_case = { "sijunhe/nezha-cn-base": "https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json", } class __A ( snake_case__ ): '''simple docstring''' a_ = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP a_ = '''nezha''' def __init__( self , _snake_case=2_1128 , _snake_case=768 , _snake_case=12 , _snake_case=12 , _snake_case=3072 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=512 , _snake_case=64 , _snake_case=2 , _snake_case=0.02 , _snake_case=1E-1_2 , _snake_case=0.1 , _snake_case=0 , _snake_case=2 , _snake_case=3 , _snake_case=True , **_snake_case , ): super().__init__(pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case ) _lowerCAmelCase : Optional[int] = vocab_size _lowerCAmelCase : int = hidden_size _lowerCAmelCase : Any = num_hidden_layers _lowerCAmelCase : Dict = num_attention_heads _lowerCAmelCase : str = hidden_act _lowerCAmelCase : List[str] = intermediate_size _lowerCAmelCase : Optional[Any] = hidden_dropout_prob _lowerCAmelCase : Dict = attention_probs_dropout_prob _lowerCAmelCase : str = max_position_embeddings _lowerCAmelCase : List[Any] = max_relative_position _lowerCAmelCase : Optional[Any] = type_vocab_size _lowerCAmelCase : Tuple = initializer_range _lowerCAmelCase : Optional[Any] = layer_norm_eps _lowerCAmelCase : List[Any] = classifier_dropout _lowerCAmelCase : Optional[Any] = use_cache
703
from __future__ import annotations from typing import Generic, TypeVar snake_case = TypeVar("T") class __A ( Generic[T] ): '''simple docstring''' def __init__( self , _snake_case ): _lowerCAmelCase : List[Any] = data _lowerCAmelCase : Dict = self _lowerCAmelCase : Tuple = 0 class __A ( Generic[T] ): '''simple docstring''' def __init__( self ): # map from node name to the node object _lowerCAmelCase : dict[T, DisjointSetTreeNode[T]] = {} def SCREAMING_SNAKE_CASE__ ( self , _snake_case ): # create a new set with x as its member _lowerCAmelCase : List[str] = DisjointSetTreeNode(_snake_case ) def SCREAMING_SNAKE_CASE__ ( self , _snake_case ): # find the set x belongs to (with path-compression) _lowerCAmelCase : Dict = self.map[data] if elem_ref != elem_ref.parent: _lowerCAmelCase : Tuple = self.find_set(elem_ref.parent.data ) return elem_ref.parent def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case ): # helper function for union operation if nodea.rank > nodea.rank: _lowerCAmelCase : int = nodea else: _lowerCAmelCase : Optional[Any] = nodea if nodea.rank == nodea.rank: nodea.rank += 1 def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case ): # merge 2 disjoint sets self.link(self.find_set(_snake_case ) , self.find_set(_snake_case ) ) class __A ( Generic[T] ): '''simple docstring''' def __init__( self ): # connections: map from the node to the neighbouring nodes (with weights) _lowerCAmelCase : dict[T, dict[T, int]] = {} def SCREAMING_SNAKE_CASE__ ( self , _snake_case ): # add a node ONLY if its not present in the graph if node not in self.connections: _lowerCAmelCase : Any = {} def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case , _snake_case ): # add an edge with the given weight self.add_node(_snake_case ) self.add_node(_snake_case ) _lowerCAmelCase : int = weight _lowerCAmelCase : Optional[int] = weight def SCREAMING_SNAKE_CASE__ ( self ): _lowerCAmelCase : Tuple = [] _lowerCAmelCase : Optional[Any] = set() for start in self.connections: for end in self.connections[start]: if (start, end) not in seen: seen.add((end, start) ) edges.append((start, end, self.connections[start][end]) ) edges.sort(key=lambda _snake_case : x[2] ) # creating the disjoint set _lowerCAmelCase : Tuple = DisjointSetTree[T]() for node in self.connections: disjoint_set.make_set(_snake_case ) # MST generation _lowerCAmelCase : Optional[Any] = 0 _lowerCAmelCase : Dict = 0 _lowerCAmelCase : int = GraphUndirectedWeighted[T]() while num_edges < len(self.connections ) - 1: _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = edges[index] index += 1 _lowerCAmelCase : Dict = disjoint_set.find_set(_snake_case ) _lowerCAmelCase : List[str] = disjoint_set.find_set(_snake_case ) if parent_u != parent_v: num_edges += 1 graph.add_edge(_snake_case , _snake_case , _snake_case ) disjoint_set.union(_snake_case , _snake_case ) return graph
587
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) _snake_case = {'configuration_plbart': ['PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PLBartConfig']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = ['PLBartTokenizer'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ 'PLBART_PRETRAINED_MODEL_ARCHIVE_LIST', 'PLBartForCausalLM', 'PLBartForConditionalGeneration', 'PLBartForSequenceClassification', 'PLBartModel', 'PLBartPreTrainedModel', ] if TYPE_CHECKING: from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_plbart import PLBartTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_plbart import ( PLBART_PRETRAINED_MODEL_ARCHIVE_LIST, PLBartForCausalLM, PLBartForConditionalGeneration, PLBartForSequenceClassification, PLBartModel, PLBartPreTrainedModel, ) else: import sys _snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure)
245
'''simple docstring''' from __future__ import annotations import numpy as np from numpy import floataa from numpy.typing import NDArray def _A ( snake_case , snake_case , snake_case , snake_case , ) -> list[float]: _lowercase , _lowercase : Union[str, Any] = coefficient_matrix.shape _lowercase , _lowercase : Optional[Any] = constant_matrix.shape if rowsa != colsa: _lowercase : Any = F'''Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}''' raise ValueError(snake_case ) if colsa != 1: _lowercase : Dict = F'''Constant matrix must be nx1 but received {rowsa}x{colsa}''' raise ValueError(snake_case ) if rowsa != rowsa: _lowercase : int = ( "Coefficient and constant matrices dimensions must be nxn and nx1 but " F'''received {rowsa}x{colsa} and {rowsa}x{colsa}''' ) raise ValueError(snake_case ) if len(snake_case ) != rowsa: _lowercase : Tuple = ( "Number of initial values must be equal to number of rows in coefficient " F'''matrix but received {len(snake_case )} and {rowsa}''' ) raise ValueError(snake_case ) if iterations <= 0: raise ValueError("Iterations must be at least 1" ) _lowercase : NDArray[floataa] = np.concatenate( (coefficient_matrix, constant_matrix) , axis=1 ) _lowercase , _lowercase : Dict = table.shape strictly_diagonally_dominant(snake_case ) # Iterates the whole matrix for given number of times for _ in range(snake_case ): _lowercase : int = [] for row in range(snake_case ): _lowercase : Tuple = 0 for col in range(snake_case ): if col == row: _lowercase : str = table[row][col] elif col == cols - 1: _lowercase : List[str] = table[row][col] else: temp += (-1) * table[row][col] * init_val[col] _lowercase : List[str] = (temp + val) / denom new_val.append(snake_case ) _lowercase : str = new_val return [float(snake_case ) for i in new_val] def _A ( snake_case ) -> bool: _lowercase , _lowercase : Optional[int] = table.shape _lowercase : Optional[Any] = True for i in range(0 , snake_case ): _lowercase : Dict = 0 for j in range(0 , cols - 1 ): if i == j: continue else: total += table[i][j] if table[i][i] <= total: raise ValueError("Coefficient matrix is not strictly diagonally dominant" ) return is_diagonally_dominant # Test Cases if __name__ == "__main__": import doctest doctest.testmod()
245
1
from io import BytesIO from typing import List, Union import requests from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_decord_available(): import numpy as np from decord import VideoReader if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING _snake_case = logging.get_logger(__name__) @add_end_docstrings(__lowerCamelCase ) class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" def __init__( self , *a_ , **a_ ): super().__init__(*a_ , **a_ ) requires_backends(self , "decord" ) self.check_model_type(a_ ) def _UpperCamelCase ( self , a_=None , a_=None , a_=None ): lowerCamelCase_ : Dict = {} if frame_sampling_rate is not None: lowerCamelCase_ : Dict = frame_sampling_rate if num_frames is not None: lowerCamelCase_ : Any = num_frames lowerCamelCase_ : Optional[int] = {} if top_k is not None: lowerCamelCase_ : Optional[Any] = top_k return preprocess_params, {}, postprocess_params def __call__( self , a_ , **a_ ): return super().__call__(a_ , **a_ ) def _UpperCamelCase ( self , a_ , a_=None , a_=1 ): if num_frames is None: lowerCamelCase_ : str = self.model.config.num_frames if video.startswith("http://" ) or video.startswith("https://" ): lowerCamelCase_ : int = BytesIO(requests.get(a_ ).content ) lowerCamelCase_ : Optional[int] = VideoReader(a_ ) videoreader.seek(0 ) lowerCamelCase_ : List[str] = 0 lowerCamelCase_ : str = num_frames * frame_sampling_rate - 1 lowerCamelCase_ : str = np.linspace(a_ , a_ , num=a_ , dtype=np.intaa ) lowerCamelCase_ : List[str] = videoreader.get_batch(a_ ).asnumpy() lowerCamelCase_ : int = list(a_ ) lowerCamelCase_ : Union[str, Any] = self.image_processor(a_ , return_tensors=self.framework ) return model_inputs def _UpperCamelCase ( self , a_ ): lowerCamelCase_ : int = self.model(**a_ ) return model_outputs def _UpperCamelCase ( self , a_ , a_=5 ): if top_k > self.model.config.num_labels: lowerCamelCase_ : int = self.model.config.num_labels if self.framework == "pt": lowerCamelCase_ : Tuple = model_outputs.logits.softmax(-1 )[0] lowerCamelCase_ : Tuple = probs.topk(a_ ) else: raise ValueError(F"""Unsupported framework: {self.framework}""" ) lowerCamelCase_ : Tuple = scores.tolist() lowerCamelCase_ : Union[str, Any] = ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(a_ , a_ )]
714
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' if digit_amount > 0: return round(number - int(lowerCAmelCase_) , lowerCAmelCase_) return number - int(lowerCAmelCase_) if __name__ == "__main__": print(decimal_isolate(1.53, 0)) print(decimal_isolate(35.3_45, 1)) print(decimal_isolate(35.3_45, 2)) print(decimal_isolate(35.3_45, 3)) print(decimal_isolate(-14.7_89, 3)) print(decimal_isolate(0, 2)) print(decimal_isolate(-14.1_23, 1)) print(decimal_isolate(-14.1_23, 2)) print(decimal_isolate(-14.1_23, 3))
73
0
from unittest import TestCase from datasets import Dataset from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters def _a ( ) -> List[Any]: """simple docstring""" lowerCamelCase__ : Any = { '''repo_name''': ['''test_repo1''', '''test_repo2''', '''test_repo3'''], '''path''': ['''test_1.py''', '''test_2.py''', '''unit_test.py'''], '''content''': ['''a ''' * 20, '''a ''' * 30, '''b ''' * 7], } lowerCamelCase__ : List[str] = Dataset.from_dict(UpperCAmelCase ) return dataset class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ): def __lowerCamelCase ( self : Optional[int] ) ->Tuple: lowerCamelCase__ : Optional[Any] = get_dataset() lowerCamelCase__ : int = make_duplicate_clusters(A , 0.85 ) self.assertEqual(len(duplicate_clusters[0] ) , 2 ) def __lowerCamelCase ( self : List[str] ) ->Any: lowerCamelCase__ : str = get_dataset() lowerCamelCase__ , lowerCamelCase__ : Any = deduplicate_dataset(A ) self.assertEqual(len(A ) , 2 ) print(A ) self.assertEqual(duplicate_clusters[0][0]['''copies'''] , 2 ) self.assertEqual(duplicate_clusters[0][0]['''is_extreme'''] , A )
315
import argparse import requests import torch from PIL import Image from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor def _a ( UpperCAmelCase ) -> Tuple: """simple docstring""" lowerCamelCase__ : Union[str, Any] = SwinConfig(image_size=192 ) if "base" in model_name: lowerCamelCase__ : List[str] = 6 lowerCamelCase__ : Any = 128 lowerCamelCase__ : Tuple = (2, 2, 18, 2) lowerCamelCase__ : int = (4, 8, 16, 32) elif "large" in model_name: lowerCamelCase__ : Any = 12 lowerCamelCase__ : List[Any] = 192 lowerCamelCase__ : Any = (2, 2, 18, 2) lowerCamelCase__ : Optional[int] = (6, 12, 24, 48) else: raise ValueError('''Model not supported, only supports base and large variants''' ) lowerCamelCase__ : List[str] = window_size lowerCamelCase__ : Optional[int] = embed_dim lowerCamelCase__ : Optional[int] = depths lowerCamelCase__ : int = num_heads return config def _a ( UpperCAmelCase ) -> Any: """simple docstring""" if "encoder.mask_token" in name: lowerCamelCase__ : str = name.replace('''encoder.mask_token''' , '''embeddings.mask_token''' ) if "encoder.patch_embed.proj" in name: lowerCamelCase__ : int = name.replace('''encoder.patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) if "encoder.patch_embed.norm" in name: lowerCamelCase__ : Optional[int] = name.replace('''encoder.patch_embed.norm''' , '''embeddings.norm''' ) if "attn.proj" in name: lowerCamelCase__ : int = name.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in name: lowerCamelCase__ : Optional[Any] = name.replace('''attn''' , '''attention.self''' ) if "norm1" in name: lowerCamelCase__ : int = name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: lowerCamelCase__ : Optional[Any] = name.replace('''norm2''' , '''layernorm_after''' ) if "mlp.fc1" in name: lowerCamelCase__ : Tuple = name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: lowerCamelCase__ : Union[str, Any] = name.replace('''mlp.fc2''' , '''output.dense''' ) if name == "encoder.norm.weight": lowerCamelCase__ : Union[str, Any] = '''layernorm.weight''' if name == "encoder.norm.bias": lowerCamelCase__ : List[Any] = '''layernorm.bias''' if "decoder" in name: pass else: lowerCamelCase__ : List[str] = '''swin.''' + name return name def _a ( UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]: """simple docstring""" for key in orig_state_dict.copy().keys(): lowerCamelCase__ : Optional[Any] = orig_state_dict.pop(UpperCAmelCase ) if "attn_mask" in key: pass elif "qkv" in key: lowerCamelCase__ : str = key.split('''.''' ) lowerCamelCase__ : Tuple = int(key_split[2] ) lowerCamelCase__ : Any = int(key_split[4] ) lowerCamelCase__ : int = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: lowerCamelCase__ : Optional[Any] = val[:dim, :] lowerCamelCase__ : Union[str, Any] = val[ dim : dim * 2, : ] lowerCamelCase__ : List[str] = val[-dim:, :] else: lowerCamelCase__ : Tuple = val[ :dim ] lowerCamelCase__ : List[Any] = val[ dim : dim * 2 ] lowerCamelCase__ : Tuple = val[ -dim: ] else: lowerCamelCase__ : Any = val return orig_state_dict def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict: """simple docstring""" lowerCamelCase__ : int = torch.load(UpperCAmelCase , map_location='''cpu''' )['''model'''] lowerCamelCase__ : Dict = get_swin_config(UpperCAmelCase ) lowerCamelCase__ : Optional[Any] = SwinForMaskedImageModeling(UpperCAmelCase ) model.eval() lowerCamelCase__ : Optional[int] = convert_state_dict(UpperCAmelCase , UpperCAmelCase ) model.load_state_dict(UpperCAmelCase ) lowerCamelCase__ : str = '''http://images.cocodataset.org/val2017/000000039769.jpg''' lowerCamelCase__ : Any = ViTImageProcessor(size={'''height''': 192, '''width''': 192} ) lowerCamelCase__ : str = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw ) lowerCamelCase__ : Any = image_processor(images=UpperCAmelCase , return_tensors='''pt''' ) with torch.no_grad(): lowerCamelCase__ : List[Any] = model(**UpperCAmelCase ).logits print(outputs.keys() ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: print(f"Saving model {model_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(UpperCAmelCase ) print(f"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(UpperCAmelCase ) if push_to_hub: print(f"Pushing model and image processor for {model_name} to hub" ) model.push_to_hub(f"microsoft/{model_name}" ) image_processor.push_to_hub(f"microsoft/{model_name}" ) if __name__ == "__main__": _A : int = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='swin-base-simmim-window6-192', type=str, choices=['swin-base-simmim-window6-192', 'swin-large-simmim-window12-192'], help='Name of the Swin SimMIM model you\'d like to convert.', ) parser.add_argument( '--checkpoint_path', default='/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth', type=str, help='Path to the original PyTorch checkpoint (.pth file).', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) _A : List[str] = parser.parse_args() convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
315
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase : Dict = logging.get_logger(__name__) lowercase : List[Any] = { 'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/config.json', 'distilbert-base-uncased-distilled-squad': ( 'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json' ), 'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/config.json', 'distilbert-base-cased-distilled-squad': ( 'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json' ), 'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json', 'distilbert-base-multilingual-cased': ( 'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json' ), 'distilbert-base-uncased-finetuned-sst-2-english': ( 'https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json' ), } class lowerCamelCase__ ( __lowercase): '''simple docstring''' _A = 'distilbert' _A = { 'hidden_size': 'dim', 'num_attention_heads': 'n_heads', 'num_hidden_layers': 'n_layers', } def __init__( self :Tuple , a :Any=3_0_5_2_2 , a :List[str]=5_1_2 , a :Optional[Any]=False , a :int=6 , a :Any=1_2 , a :Optional[Any]=7_6_8 , a :Optional[int]=4 * 7_6_8 , a :Tuple=0.1 , a :int=0.1 , a :Union[str, Any]="gelu" , a :List[Any]=0.02 , a :Any=0.1 , a :List[str]=0.2 , a :List[Any]=0 , **a :List[Any] , ) -> Tuple: __UpperCamelCase : Any = vocab_size __UpperCamelCase : List[str] = max_position_embeddings __UpperCamelCase : Tuple = sinusoidal_pos_embds __UpperCamelCase : Union[str, Any] = n_layers __UpperCamelCase : Tuple = n_heads __UpperCamelCase : int = dim __UpperCamelCase : Union[str, Any] = hidden_dim __UpperCamelCase : List[str] = dropout __UpperCamelCase : Dict = attention_dropout __UpperCamelCase : Any = activation __UpperCamelCase : Union[str, Any] = initializer_range __UpperCamelCase : Optional[Any] = qa_dropout __UpperCamelCase : Tuple = seq_classif_dropout super().__init__(**a , pad_token_id=a ) class lowerCamelCase__ ( __lowercase): '''simple docstring''' @property def _lowerCamelCase ( self :Optional[int] ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": __UpperCamelCase : Tuple = {0: "batch", 1: "choice", 2: "sequence"} else: __UpperCamelCase : Dict = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
715
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowercase : Optional[Any] = logging.get_logger(__name__) lowercase : List[str] = { 'shi-labs/nat-mini-in1k-224': 'https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json', # See all Nat models at https://huggingface.co/models?filter=nat } class lowerCamelCase__ ( __lowercase , __lowercase): '''simple docstring''' _A = 'nat' _A = { 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self :Optional[Any] , a :Any=4 , a :Any=3 , a :int=6_4 , a :Dict=[3, 4, 6, 5] , a :Dict=[2, 4, 8, 1_6] , a :Optional[Any]=7 , a :Any=3.0 , a :Optional[int]=True , a :int=0.0 , a :Union[str, Any]=0.0 , a :List[Any]=0.1 , a :str="gelu" , a :Union[str, Any]=0.02 , a :Tuple=1E-5 , a :str=0.0 , a :Optional[int]=None , a :Dict=None , **a :Optional[Any] , ) -> int: super().__init__(**a ) __UpperCamelCase : Any = patch_size __UpperCamelCase : str = num_channels __UpperCamelCase : List[Any] = embed_dim __UpperCamelCase : str = depths __UpperCamelCase : str = len(a ) __UpperCamelCase : Optional[Any] = num_heads __UpperCamelCase : Dict = kernel_size __UpperCamelCase : Union[str, Any] = mlp_ratio __UpperCamelCase : Union[str, Any] = qkv_bias __UpperCamelCase : List[str] = hidden_dropout_prob __UpperCamelCase : Any = attention_probs_dropout_prob __UpperCamelCase : Any = drop_path_rate __UpperCamelCase : Any = hidden_act __UpperCamelCase : Tuple = layer_norm_eps __UpperCamelCase : Dict = initializer_range # we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model __UpperCamelCase : int = int(embed_dim * 2 ** (len(a ) - 1) ) __UpperCamelCase : List[Any] = layer_scale_init_value __UpperCamelCase : Optional[Any] = ["stem"] + [f'stage{idx}' for idx in range(1 , len(a ) + 1 )] __UpperCamelCase , __UpperCamelCase : Any = get_aligned_output_features_output_indices( out_features=a , out_indices=a , stage_names=self.stage_names )
94
0
"""simple docstring""" import os from typing import BinaryIO, Optional, Union import numpy as np import pyarrow.parquet as pq from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config from ..features.features import FeatureType, _visit from ..formatting import query_table from ..packaged_modules import _PACKAGED_DATASETS_MODULES from ..packaged_modules.parquet.parquet import Parquet from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader def __snake_case ( SCREAMING_SNAKE_CASE: Features ): """simple docstring""" _lowerCAmelCase = np.inf def set_batch_size(SCREAMING_SNAKE_CASE: FeatureType ) -> None: nonlocal batch_size if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): _lowerCAmelCase = min(SCREAMING_SNAKE_CASE , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS ) elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): _lowerCAmelCase = min(SCREAMING_SNAKE_CASE , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS ) elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and feature.dtype == "binary": _lowerCAmelCase = min(SCREAMING_SNAKE_CASE , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS ) _visit(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) return None if batch_size is np.inf else batch_size class _SCREAMING_SNAKE_CASE ( UpperCAmelCase ): '''simple docstring''' def __init__( self : Optional[int] , UpperCAmelCase_ : NestedDataStructureLike[PathLike] , UpperCAmelCase_ : Optional[NamedSplit] = None , UpperCAmelCase_ : Optional[Features] = None , UpperCAmelCase_ : str = None , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional[int] = None , **UpperCAmelCase_ : List[Any] , ) -> Tuple: """simple docstring""" super().__init__( UpperCAmelCase_ , split=UpperCAmelCase_ , features=UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , keep_in_memory=UpperCAmelCase_ , streaming=UpperCAmelCase_ , num_proc=UpperCAmelCase_ , **UpperCAmelCase_ , ) _lowerCAmelCase = path_or_paths if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else {self.split: path_or_paths} _lowerCAmelCase = _PACKAGED_DATASETS_MODULES['parquet'][1] _lowerCAmelCase = Parquet( cache_dir=UpperCAmelCase_ , data_files=UpperCAmelCase_ , features=UpperCAmelCase_ , hash=UpperCAmelCase_ , **UpperCAmelCase_ , ) def __lowerCamelCase ( self : int ) -> List[str]: """simple docstring""" if self.streaming: _lowerCAmelCase = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: _lowerCAmelCase = None _lowerCAmelCase = None _lowerCAmelCase = None _lowerCAmelCase = None self.builder.download_and_prepare( download_config=UpperCAmelCase_ , download_mode=UpperCAmelCase_ , verification_mode=UpperCAmelCase_ , base_path=UpperCAmelCase_ , num_proc=self.num_proc , ) _lowerCAmelCase = self.builder.as_dataset( split=self.split , verification_mode=UpperCAmelCase_ , in_memory=self.keep_in_memory ) return dataset class _SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : Dict , UpperCAmelCase_ : Dataset , UpperCAmelCase_ : Union[PathLike, BinaryIO] , UpperCAmelCase_ : Optional[int] = None , **UpperCAmelCase_ : List[str] , ) -> Optional[int]: """simple docstring""" _lowerCAmelCase = dataset _lowerCAmelCase = path_or_buf _lowerCAmelCase = batch_size or get_writer_batch_size(dataset.features ) _lowerCAmelCase = parquet_writer_kwargs def __lowerCamelCase ( self : List[Any] ) -> int: """simple docstring""" _lowerCAmelCase = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ): with open(self.path_or_buf , 'wb+' ) as buffer: _lowerCAmelCase = self._write(file_obj=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , **self.parquet_writer_kwargs ) else: _lowerCAmelCase = self._write(file_obj=self.path_or_buf , batch_size=UpperCAmelCase_ , **self.parquet_writer_kwargs ) return written def __lowerCamelCase ( self : List[Any] , UpperCAmelCase_ : BinaryIO , UpperCAmelCase_ : int , **UpperCAmelCase_ : Dict ) -> int: """simple docstring""" _lowerCAmelCase = 0 _lowerCAmelCase = parquet_writer_kwargs.pop('path_or_buf' , UpperCAmelCase_ ) _lowerCAmelCase = self.dataset.features.arrow_schema _lowerCAmelCase = pq.ParquetWriter(UpperCAmelCase_ , schema=UpperCAmelCase_ , **UpperCAmelCase_ ) for offset in logging.tqdm( range(0 , len(self.dataset ) , UpperCAmelCase_ ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating parquet from Arrow format' , ): _lowerCAmelCase = query_table( table=self.dataset._data , key=slice(UpperCAmelCase_ , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , ) writer.write_table(UpperCAmelCase_ ) written += batch.nbytes writer.close() return written
580
"""simple docstring""" from sklearn.metrics import fa_score, matthews_corrcoef import datasets from .record_evaluation import evaluate as evaluate_record _snake_case = '''\ @article{wang2019superglue, title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems}, author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R}, journal={arXiv preprint arXiv:1905.00537}, year={2019} } ''' _snake_case = '''\ SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after GLUE with a new set of more difficult language understanding tasks, improved resources, and a new public leaderboard. ''' _snake_case = ''' Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset. Args: predictions: list of predictions to score. Depending on the SuperGlUE subset: - for \'record\': list of question-answer dictionaries with the following keys: - \'idx\': index of the question as specified by the dataset - \'prediction_text\': the predicted answer text - for \'multirc\': list of question-answer dictionaries with the following keys: - \'idx\': index of the question-answer pair as specified by the dataset - \'prediction\': the predicted answer label - otherwise: list of predicted labels references: list of reference labels. Depending on the SuperGLUE subset: - for \'record\': list of question-answers dictionaries with the following keys: - \'idx\': index of the question as specified by the dataset - \'answers\': list of possible answers - otherwise: list of reference labels Returns: depending on the SuperGLUE subset: - for \'record\': - \'exact_match\': Exact match between answer and gold answer - \'f1\': F1 score - for \'multirc\': - \'exact_match\': Exact match between answer and gold answer - \'f1_m\': Per-question macro-F1 score - \'f1_a\': Average F1 score over all answers - for \'axb\': \'matthews_correlation\': Matthew Correlation - for \'cb\': - \'accuracy\': Accuracy - \'f1\': F1 score - for all others: - \'accuracy\': Accuracy Examples: >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"] >>> predictions = [0, 1] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'accuracy\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\') >>> predictions = [0, 1] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'accuracy\': 1.0, \'f1\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\') >>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}] >>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'exact_match\': 1.0, \'f1\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\') >>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\') >>> references = [0, 1] >>> predictions = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'matthews_correlation\': 1.0} ''' def __snake_case ( SCREAMING_SNAKE_CASE: List[str] , SCREAMING_SNAKE_CASE: Dict ): """simple docstring""" return float((preds == labels).mean() ) def __snake_case ( SCREAMING_SNAKE_CASE: Dict , SCREAMING_SNAKE_CASE: str , SCREAMING_SNAKE_CASE: Union[str, Any]="binary" ): """simple docstring""" _lowerCAmelCase = simple_accuracy(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) _lowerCAmelCase = float(fa_score(y_true=SCREAMING_SNAKE_CASE , y_pred=SCREAMING_SNAKE_CASE , average=SCREAMING_SNAKE_CASE ) ) return { "accuracy": acc, "f1": fa, } def __snake_case ( SCREAMING_SNAKE_CASE: Any , SCREAMING_SNAKE_CASE: List[Any] ): """simple docstring""" _lowerCAmelCase = {} for id_pred, label in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): _lowerCAmelCase = f"""{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}""" _lowerCAmelCase = id_pred['prediction'] if question_id in question_map: question_map[question_id].append((pred, label) ) else: _lowerCAmelCase = [(pred, label)] _lowerCAmelCase , _lowerCAmelCase = [], [] for question, preds_labels in question_map.items(): _lowerCAmelCase , _lowerCAmelCase = zip(*SCREAMING_SNAKE_CASE ) _lowerCAmelCase = fa_score(y_true=SCREAMING_SNAKE_CASE , y_pred=SCREAMING_SNAKE_CASE , average='macro' ) fas.append(SCREAMING_SNAKE_CASE ) _lowerCAmelCase = int(sum(pred == label for pred, label in preds_labels ) == len(SCREAMING_SNAKE_CASE ) ) ems.append(SCREAMING_SNAKE_CASE ) _lowerCAmelCase = float(sum(SCREAMING_SNAKE_CASE ) / len(SCREAMING_SNAKE_CASE ) ) _lowerCAmelCase = sum(SCREAMING_SNAKE_CASE ) / len(SCREAMING_SNAKE_CASE ) _lowerCAmelCase = float(fa_score(y_true=SCREAMING_SNAKE_CASE , y_pred=[id_pred['prediction'] for id_pred in ids_preds] ) ) return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a} @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _SCREAMING_SNAKE_CASE ( datasets.Metric ): '''simple docstring''' def __lowerCamelCase ( self : Tuple ) -> List[str]: """simple docstring""" if self.config_name not in [ "boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg", ]: raise KeyError( 'You should supply a configuration name selected in ' '["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='numpy' if not self.config_name == 'record' and not self.config_name == 'multirc' else None , ) def __lowerCamelCase ( self : int ) -> Union[str, Any]: """simple docstring""" if self.config_name == "record": return { "predictions": { "idx": { "passage": datasets.Value('int64' ), "query": datasets.Value('int64' ), }, "prediction_text": datasets.Value('string' ), }, "references": { "idx": { "passage": datasets.Value('int64' ), "query": datasets.Value('int64' ), }, "answers": datasets.Sequence(datasets.Value('string' ) ), }, } elif self.config_name == "multirc": return { "predictions": { "idx": { "answer": datasets.Value('int64' ), "paragraph": datasets.Value('int64' ), "question": datasets.Value('int64' ), }, "prediction": datasets.Value('int64' ), }, "references": datasets.Value('int64' ), } else: return { "predictions": datasets.Value('int64' ), "references": datasets.Value('int64' ), } def __lowerCamelCase ( self : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] ) -> Dict: """simple docstring""" if self.config_name == "axb": return {"matthews_correlation": matthews_corrcoef(UpperCAmelCase_ , UpperCAmelCase_ )} elif self.config_name == "cb": return acc_and_fa(UpperCAmelCase_ , UpperCAmelCase_ , fa_avg='macro' ) elif self.config_name == "record": _lowerCAmelCase = [ { 'qas': [ {'id': ref['idx']['query'], 'answers': [{'text': ans} for ans in ref['answers']]} for ref in references ] } ] _lowerCAmelCase = {pred['idx']['query']: pred['prediction_text'] for pred in predictions} return evaluate_record(UpperCAmelCase_ , UpperCAmelCase_ )[0] elif self.config_name == "multirc": return evaluate_multirc(UpperCAmelCase_ , UpperCAmelCase_ ) elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]: return {"accuracy": simple_accuracy(UpperCAmelCase_ , UpperCAmelCase_ )} else: raise KeyError( 'You should supply a configuration name selected in ' '["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
580
1
from __future__ import annotations from math import pi from typing import Protocol import matplotlib.pyplot as plt import numpy as np class __snake_case ( snake_case__ ): """simple docstring""" def UpperCAmelCase_ ( self : str ,lowerCAmelCase__ : float ) -> float: '''simple docstring''' return 0.0 def UpperCamelCase ( snake_case__ , snake_case__): lowerCAmelCase_ : Tuple = min([-20, np.min(fft_results[1 : samplerate // 2 - 1])]) lowerCAmelCase_ : List[str] = max([20, np.max(fft_results[1 : samplerate // 2 - 1])]) return lowest, highest def UpperCamelCase ( snake_case__ , snake_case__): lowerCAmelCase_ : Tuple = 5_12 lowerCAmelCase_ : Union[str, Any] = [1] + [0] * (size - 1) lowerCAmelCase_ : Any = [filter_type.process(snake_case__) for item in inputs] lowerCAmelCase_ : Optional[int] = [0] * (samplerate - size) # zero-padding outputs += filler lowerCAmelCase_ : Optional[Any] = np.abs(np.fft.fft(snake_case__)) lowerCAmelCase_ : Any = 20 * np.logaa(snake_case__) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1) plt.xlabel("Frequency (Hz)") plt.xscale("log") # Display within reasonable bounds lowerCAmelCase_ : Optional[Any] = get_bounds(snake_case__ , snake_case__) plt.ylim(max([-80, bounds[0]]) , min([80, bounds[1]])) plt.ylabel("Gain (dB)") plt.plot(snake_case__) plt.show() def UpperCamelCase ( snake_case__ , snake_case__): lowerCAmelCase_ : List[Any] = 5_12 lowerCAmelCase_ : Tuple = [1] + [0] * (size - 1) lowerCAmelCase_ : Optional[Any] = [filter_type.process(snake_case__) for item in inputs] lowerCAmelCase_ : str = [0] * (samplerate - size) # zero-padding outputs += filler lowerCAmelCase_ : str = np.angle(np.fft.fft(snake_case__)) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1) plt.xlabel("Frequency (Hz)") plt.xscale("log") plt.ylim(-2 * pi , 2 * pi) plt.ylabel("Phase shift (Radians)") plt.plot(np.unwrap(snake_case__ , -2 * pi)) plt.show()
683
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowercase = { '''configuration_git''': ['''GIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GitConfig''', '''GitVisionConfig'''], '''processing_git''': ['''GitProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''GIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GitForCausalLM''', '''GitModel''', '''GitPreTrainedModel''', '''GitVisionModel''', ] if TYPE_CHECKING: from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig from .processing_git import GitProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_git import ( GIT_PRETRAINED_MODEL_ARCHIVE_LIST, GitForCausalLM, GitModel, GitPreTrainedModel, GitVisionModel, ) else: import sys _lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
683
1
"""simple docstring""" A_ = """Alexander Joslin""" import operator as op from .stack import Stack def lowercase ( lowerCAmelCase__ ): lowerCamelCase_ = {'''*''': op.mul, '''/''': op.truediv, '''+''': op.add, '''-''': op.sub} lowerCamelCase_ = Stack() lowerCamelCase_ = Stack() for i in equation: if i.isdigit(): # RULE 1 operand_stack.push(int(lowerCAmelCase__ ) ) elif i in operators: # RULE 2 operator_stack.push(lowerCAmelCase__ ) elif i == ")": # RULE 4 lowerCamelCase_ = operator_stack.peek() operator_stack.pop() lowerCamelCase_ = operand_stack.peek() operand_stack.pop() lowerCamelCase_ = operand_stack.peek() operand_stack.pop() lowerCamelCase_ = operators[opr](lowerCAmelCase__ ,lowerCAmelCase__ ) operand_stack.push(lowerCAmelCase__ ) # RULE 5 return operand_stack.peek() if __name__ == "__main__": A_ = """(5 + ((4 * 2) * (2 + 3)))""" # answer = 45 print(f"{equation} = {dijkstras_two_stack_algorithm(equation)}")
29
"""simple docstring""" from jiwer import compute_measures import datasets A_ = """\ @inproceedings{inproceedings, author = {Morris, Andrew and Maier, Viktoria and Green, Phil}, year = {2004}, month = {01}, pages = {}, title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.} } """ A_ = """\ Word error rate (WER) is a common metric of the performance of an automatic speech recognition system. The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort. This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate. Word error rate can then be computed as: WER = (S + D + I) / N = (S + D + I) / (S + D + C) where S is the number of substitutions, D is the number of deletions, I is the number of insertions, C is the number of correct words, N is the number of words in the reference (N=S+D+C). This value indicates the average number of errors per reference word. The lower the value, the better the performance of the ASR system with a WER of 0 being a perfect score. """ A_ = """ Compute WER score of transcribed segments against references. Args: references: List of references for each speech input. predictions: List of transcriptions to score. concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively. Returns: (float): the word error rate Examples: >>> predictions = [\"this is the prediction\", \"there is an other sample\"] >>> references = [\"this is the reference\", \"there is another one\"] >>> wer = datasets.load_metric(\"wer\") >>> wer_score = wer.compute(predictions=predictions, references=references) >>> print(wer_score) 0.5 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __lowerCamelCase ( datasets.Metric ): def UpperCAmelCase__ ( self ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Value('''string''' , id='''sequence''' ), } ) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[ '''https://en.wikipedia.org/wiki/Word_error_rate''', ] , ) def UpperCAmelCase__ ( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=False ): if concatenate_texts: return compute_measures(UpperCAmelCase , UpperCAmelCase )["wer"] else: lowerCamelCase_ = 0 lowerCamelCase_ = 0 for prediction, reference in zip(UpperCAmelCase , UpperCAmelCase ): lowerCamelCase_ = compute_measures(UpperCAmelCase , UpperCAmelCase ) incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"] total += measures["substitutions"] + measures["deletions"] + measures["hits"] return incorrect / total
29
1
def lowerCamelCase__ ( _a , _a): SCREAMING_SNAKE_CASE : Optional[int] = len(_a) SCREAMING_SNAKE_CASE : Optional[Any] = [[False] * (required_sum + 1) for _ in range(arr_len + 1)] # for each arr value, a sum of zero(0) can be formed by not taking any element # hence True/1 for i in range(arr_len + 1): SCREAMING_SNAKE_CASE : Dict = True # sum is not zero and set is empty then false for i in range(1 , required_sum + 1): SCREAMING_SNAKE_CASE : Union[str, Any] = False for i in range(1 , arr_len + 1): for j in range(1 , required_sum + 1): if arr[i - 1] > j: SCREAMING_SNAKE_CASE : Optional[int] = subset[i - 1][j] if arr[i - 1] <= j: SCREAMING_SNAKE_CASE : List[Any] = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]] return subset[arr_len][required_sum] if __name__ == "__main__": import doctest doctest.testmod()
710
import itertools from dataclasses import dataclass from typing import List, Optional import pyarrow as pa import pyarrow.parquet as pq import datasets from datasets.table import table_cast a_ = datasets.utils.logging.get_logger(__name__) @dataclass class _UpperCamelCase ( datasets.BuilderConfig ): '''simple docstring''' lowerCamelCase__ =10000 lowerCamelCase__ =None lowerCamelCase__ =None class _UpperCamelCase ( datasets.ArrowBasedBuilder ): '''simple docstring''' lowerCamelCase__ =ParquetConfig def __UpperCamelCase ( self : str ) -> str: """simple docstring""" return datasets.DatasetInfo(features=self.config.features ) def __UpperCamelCase ( self : Dict , a : List[Any] ) -> Tuple: """simple docstring""" if not self.config.data_files: raise ValueError(F"At least one data file must be specified, but got data_files={self.config.data_files}" ) SCREAMING_SNAKE_CASE : Tuple = dl_manager.download_and_extract(self.config.data_files ) if isinstance(a , (str, list, tuple) ): SCREAMING_SNAKE_CASE : Dict = data_files if isinstance(a , a ): SCREAMING_SNAKE_CASE : Union[str, Any] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive SCREAMING_SNAKE_CASE : Optional[Any] = [dl_manager.iter_files(a ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )] SCREAMING_SNAKE_CASE : str = [] for split_name, files in data_files.items(): if isinstance(a , a ): SCREAMING_SNAKE_CASE : Optional[int] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive SCREAMING_SNAKE_CASE : Tuple = [dl_manager.iter_files(a ) for file in files] # Infer features is they are stoed in the arrow schema if self.info.features is None: for file in itertools.chain.from_iterable(a ): with open(a , "rb" ) as f: SCREAMING_SNAKE_CASE : Dict = datasets.Features.from_arrow_schema(pq.read_schema(a ) ) break splits.append(datasets.SplitGenerator(name=a , gen_kwargs={"files": files} ) ) return splits def __UpperCamelCase ( self : Dict , a : pa.Table ) -> pa.Table: """simple docstring""" if self.info.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example SCREAMING_SNAKE_CASE : str = table_cast(a , self.info.features.arrow_schema ) return pa_table def __UpperCamelCase ( self : List[str] , a : Optional[Any] ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : Any = self.info.features.arrow_schema if self.info.features is not None else None if self.info.features is not None and self.config.columns is not None: if sorted(field.name for field in schema ) != sorted(self.config.columns ): raise ValueError( F"Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'" ) for file_idx, file in enumerate(itertools.chain.from_iterable(a ) ): with open(a , "rb" ) as f: SCREAMING_SNAKE_CASE : Optional[int] = pq.ParquetFile(a ) try: for batch_idx, record_batch in enumerate( parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ): SCREAMING_SNAKE_CASE : int = pa.Table.from_batches([record_batch] ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield F"{file_idx}_{batch_idx}", self._cast_table(a ) except ValueError as e: logger.error(F"Failed to read file '{file}' with error {type(a )}: {e}" ) raise
193
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) snake_case = { """configuration_gpt_bigcode""": ["""GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTBigCodeConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = [ """GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST""", """GPTBigCodeForSequenceClassification""", """GPTBigCodeForTokenClassification""", """GPTBigCodeForCausalLM""", """GPTBigCodeModel""", """GPTBigCodePreTrainedModel""", ] if TYPE_CHECKING: from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_bigcode import ( GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTBigCodeForCausalLM, GPTBigCodeForSequenceClassification, GPTBigCodeForTokenClassification, GPTBigCodeModel, GPTBigCodePreTrainedModel, ) else: import sys snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
67
import argparse import os import re import numpy as np import PIL import torch from timm import create_model from torch.optim.lr_scheduler import OneCycleLR from torch.utils.data import DataLoader, Dataset from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor from accelerate import Accelerator def snake_case_ (__A : Tuple ) -> str: __lowerCAmelCase : List[str] = fname.split(os.path.sep )[-1] return re.search(r"""^(.*)_\d+\.jpg$""" , __A ).groups()[0] class SCREAMING_SNAKE_CASE ( a_ ): """simple docstring""" def __init__( self : Optional[int] , lowerCAmelCase : str , lowerCAmelCase : List[str]=None , lowerCAmelCase : Tuple=None ) -> int: """simple docstring""" __lowerCAmelCase : int = file_names __lowerCAmelCase : Dict = image_transform __lowerCAmelCase : int = label_to_id def __len__( self : Any ) -> Union[str, Any]: """simple docstring""" return len(self.file_names ) def __getitem__( self : Dict , lowerCAmelCase : str ) -> Union[str, Any]: """simple docstring""" __lowerCAmelCase : List[str] = self.file_names[idx] __lowerCAmelCase : Any = PIL.Image.open(lowerCAmelCase ) __lowerCAmelCase : List[Any] = raw_image.convert("""RGB""" ) if self.image_transform is not None: __lowerCAmelCase : Union[str, Any] = self.image_transform(lowerCAmelCase ) __lowerCAmelCase : int = extract_label(lowerCAmelCase ) if self.label_to_id is not None: __lowerCAmelCase : Union[str, Any] = self.label_to_id[label] return {"image": image, "label": label} def snake_case_ (__A : Optional[Any] , __A : Union[str, Any] ) -> Tuple: # Initialize accelerator if args.with_tracking: __lowerCAmelCase : Tuple = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="""all""" , project_dir=args.project_dir ) else: __lowerCAmelCase : Optional[int] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __lowerCAmelCase : Union[str, Any] = config["""lr"""] __lowerCAmelCase : List[str] = int(config["""num_epochs"""] ) __lowerCAmelCase : List[Any] = int(config["""seed"""] ) __lowerCAmelCase : Optional[int] = int(config["""batch_size"""] ) __lowerCAmelCase : int = config["""image_size"""] if not isinstance(__A , (list, tuple) ): __lowerCAmelCase : Optional[Any] = (image_size, image_size) # Parse out whether we are saving every epoch or after a certain number of batches if hasattr(args.checkpointing_steps , """isdigit""" ): if args.checkpointing_steps == "epoch": __lowerCAmelCase : int = args.checkpointing_steps elif args.checkpointing_steps.isdigit(): __lowerCAmelCase : List[str] = int(args.checkpointing_steps ) else: raise ValueError( f'''Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.''' ) else: __lowerCAmelCase : List[str] = None # We need to initialize the trackers we use, and also store our configuration if args.with_tracking: __lowerCAmelCase : Any = os.path.split(__A )[-1].split(""".""" )[0] accelerator.init_trackers(__A , __A ) # Grab all the image filenames __lowerCAmelCase : Optional[int] = [os.path.join(args.data_dir , __A ) for fname in os.listdir(args.data_dir ) if fname.endswith(""".jpg""" )] # Build the label correspondences __lowerCAmelCase : Tuple = [extract_label(__A ) for fname in file_names] __lowerCAmelCase : Union[str, Any] = list(set(__A ) ) id_to_label.sort() __lowerCAmelCase : Optional[Any] = {lbl: i for i, lbl in enumerate(__A )} # Set the seed before splitting the data. np.random.seed(__A ) torch.manual_seed(__A ) torch.cuda.manual_seed_all(__A ) # Split our filenames between train and validation __lowerCAmelCase : str = np.random.permutation(len(__A ) ) __lowerCAmelCase : Any = int(0.8 * len(__A ) ) __lowerCAmelCase : List[str] = random_perm[:cut] __lowerCAmelCase : Tuple = random_perm[cut:] # For training we use a simple RandomResizedCrop __lowerCAmelCase : Dict = Compose([RandomResizedCrop(__A , scale=(0.5, 1.0) ), ToTensor()] ) __lowerCAmelCase : List[str] = PetsDataset( [file_names[i] for i in train_split] , image_transform=__A , label_to_id=__A ) # For evaluation, we use a deterministic Resize __lowerCAmelCase : Optional[Any] = Compose([Resize(__A ), ToTensor()] ) __lowerCAmelCase : List[Any] = PetsDataset([file_names[i] for i in eval_split] , image_transform=__A , label_to_id=__A ) # Instantiate dataloaders. __lowerCAmelCase : Union[str, Any] = DataLoader(__A , shuffle=__A , batch_size=__A , num_workers=4 ) __lowerCAmelCase : Union[str, Any] = DataLoader(__A , shuffle=__A , batch_size=__A , num_workers=4 ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) __lowerCAmelCase : List[Any] = create_model("""resnet50d""" , pretrained=__A , num_classes=len(__A ) ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). __lowerCAmelCase : Optional[int] = model.to(accelerator.device ) # Freezing the base model for param in model.parameters(): __lowerCAmelCase : Dict = False for param in model.get_classifier().parameters(): __lowerCAmelCase : Union[str, Any] = True # We normalize the batches of images to be a bit faster. __lowerCAmelCase : Tuple = torch.tensor(model.default_cfg["""mean"""] )[None, :, None, None].to(accelerator.device ) __lowerCAmelCase : Union[str, Any] = torch.tensor(model.default_cfg["""std"""] )[None, :, None, None].to(accelerator.device ) # Instantiate optimizer __lowerCAmelCase : int = torch.optim.Adam(params=model.parameters() , lr=lr / 2_5 ) # Instantiate learning rate scheduler __lowerCAmelCase : List[Any] = OneCycleLR(optimizer=__A , max_lr=__A , epochs=__A , steps_per_epoch=len(__A ) ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase : Optional[Any] = accelerator.prepare( __A , __A , __A , __A , __A ) # We need to keep track of how many total steps we have iterated over __lowerCAmelCase : Tuple = 0 # We also need to keep track of the starting epoch so files are named properly __lowerCAmelCase : Union[str, Any] = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "": accelerator.print(f'''Resumed from checkpoint: {args.resume_from_checkpoint}''' ) accelerator.load_state(args.resume_from_checkpoint ) __lowerCAmelCase : Dict = os.path.basename(args.resume_from_checkpoint ) else: # Get the most recent checkpoint __lowerCAmelCase : Tuple = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()] dirs.sort(key=os.path.getctime ) __lowerCAmelCase : Union[str, Any] = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last # Extract `epoch_{i}` or `step_{i}` __lowerCAmelCase : Any = os.path.splitext(__A )[0] if "epoch" in training_difference: __lowerCAmelCase : List[str] = int(training_difference.replace("""epoch_""" , """""" ) ) + 1 __lowerCAmelCase : Any = None else: __lowerCAmelCase : Optional[int] = int(training_difference.replace("""step_""" , """""" ) ) __lowerCAmelCase : int = resume_step // len(__A ) resume_step -= starting_epoch * len(__A ) # Now we train the model for epoch in range(__A , __A ): model.train() if args.with_tracking: __lowerCAmelCase : Dict = 0 if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None: # We need to skip steps until we reach the resumed step __lowerCAmelCase : str = accelerator.skip_first_batches(__A , __A ) overall_step += resume_step else: # After the first iteration though, we need to go back to the original dataloader __lowerCAmelCase : List[str] = train_dataloader for batch in active_dataloader: # We could avoid this line since we set the accelerator with `device_placement=True`. __lowerCAmelCase : List[str] = {k: v.to(accelerator.device ) for k, v in batch.items()} __lowerCAmelCase : Tuple = (batch["""image"""] - mean) / std __lowerCAmelCase : List[Any] = model(__A ) __lowerCAmelCase : str = torch.nn.functional.cross_entropy(__A , batch["""label"""] ) # We keep track of the loss at each epoch if args.with_tracking: total_loss += loss.detach().float() accelerator.backward(__A ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 if isinstance(__A , __A ): __lowerCAmelCase : Union[str, Any] = f'''step_{overall_step}''' if overall_step % checkpointing_steps == 0: if args.output_dir is not None: __lowerCAmelCase : Optional[Any] = os.path.join(args.output_dir , __A ) accelerator.save_state(__A ) model.eval() __lowerCAmelCase : str = 0 __lowerCAmelCase : Union[str, Any] = 0 for step, batch in enumerate(__A ): # We could avoid this line since we set the accelerator with `device_placement=True`. __lowerCAmelCase : Union[str, Any] = {k: v.to(accelerator.device ) for k, v in batch.items()} __lowerCAmelCase : Optional[int] = (batch["""image"""] - mean) / std with torch.no_grad(): __lowerCAmelCase : List[Any] = model(__A ) __lowerCAmelCase : Optional[Any] = outputs.argmax(dim=-1 ) __lowerCAmelCase ,__lowerCAmelCase : Any = accelerator.gather_for_metrics((predictions, batch["""label"""]) ) __lowerCAmelCase : int = predictions == references num_elems += accurate_preds.shape[0] accurate += accurate_preds.long().sum() __lowerCAmelCase : Optional[int] = accurate.item() / num_elems # Use accelerator.print to print only on the main process. accelerator.print(f'''epoch {epoch}: {1_0_0 * eval_metric:.2f}''' ) if args.with_tracking: accelerator.log( { """accuracy""": 1_0_0 * eval_metric, """train_loss""": total_loss.item() / len(__A ), """epoch""": epoch, } , step=__A , ) if checkpointing_steps == "epoch": __lowerCAmelCase : Union[str, Any] = f'''epoch_{epoch}''' if args.output_dir is not None: __lowerCAmelCase : Any = os.path.join(args.output_dir , __A ) accelerator.save_state(__A ) if args.with_tracking: accelerator.end_training() def snake_case_ () -> Any: __lowerCAmelCase : Tuple = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument("""--data_dir""" , required=__A , help="""The data folder on disk.""" ) parser.add_argument("""--fp16""" , action="""store_true""" , help="""If passed, will use FP16 training.""" ) parser.add_argument( """--mixed_precision""" , type=__A , default=__A , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" ) parser.add_argument( """--checkpointing_steps""" , type=__A , default=__A , help="""Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.""" , ) parser.add_argument( """--output_dir""" , type=__A , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , ) parser.add_argument( """--resume_from_checkpoint""" , type=__A , default=__A , help="""If the training should continue from a checkpoint folder.""" , ) parser.add_argument( """--with_tracking""" , action="""store_true""" , help="""Whether to load in all available experiment trackers from the environment and use them for logging.""" , ) parser.add_argument( """--project_dir""" , type=__A , default="""logs""" , help="""Location on where to store experiment tracking logs` and relevent project information""" , ) __lowerCAmelCase : int = parser.parse_args() __lowerCAmelCase : Tuple = {"""lr""": 3e-2, """num_epochs""": 3, """seed""": 4_2, """batch_size""": 6_4, """image_size""": 2_2_4} training_function(__A , __A ) if __name__ == "__main__": main()
651
0
"""simple docstring""" import itertools import os from collections import Counter, defaultdict from concurrent.futures import ThreadPoolExecutor, as_completed import numpy as np import datasets from .execute import check_correctness A_ : Optional[Any] ="""\ @misc{chen2021evaluating, title={Evaluating Large Language Models Trained on Code}, author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \ and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \ and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \ and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \ and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \ and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \ and Mohammad Bavarian and Clemens Winter and Philippe Tillet \ and Felipe Petroski Such and Dave Cummings and Matthias Plappert \ and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \ and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \ and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \ and William Saunders and Christopher Hesse and Andrew N. Carr \ and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \ and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \ and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \ and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba}, year={2021}, eprint={2107.03374}, archivePrefix={arXiv}, primaryClass={cs.LG} } """ A_ : int ="""\ This metric implements the evaluation harness for the HumanEval problem solving dataset described in the paper \"Evaluating Large Language Models Trained on Code\" (https://arxiv.org/abs/2107.03374). """ A_ : List[str] =""" Calculates how good are predictions given some references, using certain scores Args: predictions: list of candidates to evaluate. Each candidates should be a list of strings with several code candidates to solve the problem. references: a list with a test for each prediction. Each test should evaluate the correctness of a code candidate. k: number of code candidates to consider in the evaluation (Default: [1, 10, 100]) num_workers: number of workers used to evaluate the canidate programs (Default: 4). timeout: Returns: pass_at_k: dict with pass rates for each k results: dict with granular results of each unittest Examples: >>> code_eval = datasets.load_metric(\"code_eval\") >>> test_cases = [\"assert add(2,3)==5\"] >>> candidates = [[\"def add(a,b): return a*b\", \"def add(a, b): return a+b\"]] >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2]) >>> print(pass_at_k) {'pass@1': 0.5, 'pass@2': 1.0} """ A_ : Union[str, Any] =""" ################################################################################ !!!WARNING!!! ################################################################################ The \"code_eval\" metric executes untrusted model-generated code in Python. Although it is highly unlikely that model-generated code will do something overtly malicious in response to this test suite, model-generated code may act destructively due to a lack of model capability or alignment. Users are strongly encouraged to sandbox this evaluation suite so that it does not perform destructive actions on their host or network. For more information on how OpenAI sandboxes its code, see the paper \"Evaluating Large Language Models Trained on Code\" (https://arxiv.org/abs/2107.03374). Once you have read this disclaimer and taken appropriate precautions, set the environment variable HF_ALLOW_CODE_EVAL=\"1\". Within Python you can to this with: >>> import os >>> os.environ[\"HF_ALLOW_CODE_EVAL\"] = \"1\" ################################################################################\ """ A_ : Optional[int] ="""The MIT License Copyright (c) OpenAI (https://openai.com) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.""" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __a ( datasets.Metric ): def snake_case_ ( self ): return datasets.MetricInfo( # This is the description that will appear on the metrics page. description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Sequence(datasets.Value('string' ) ), 'references': datasets.Value('string' ), } ) , homepage='https://github.com/openai/human-eval' , codebase_urls=['https://github.com/openai/human-eval'] , reference_urls=['https://github.com/openai/human-eval'] , license=_LICENSE , ) def snake_case_ ( self , a__ , a__ , a__=[1, 10, 1_00] , a__=4 , a__=3.0 ): if os.getenv('HF_ALLOW_CODE_EVAL' , 0 ) != "1": raise ValueError(_WARNING ) if os.name == "nt": raise NotImplementedError('This metric is currently not supported on Windows.' ) with ThreadPoolExecutor(max_workers=__a ) as executor: _lowerCamelCase = [] _lowerCamelCase = Counter() _lowerCamelCase = 0 _lowerCamelCase = defaultdict(__a ) for task_id, (candidates, test_case) in enumerate(zip(__a , __a ) ): for candidate in candidates: _lowerCamelCase = candidate + '\n' + test_case _lowerCamelCase = (test_program, timeout, task_id, completion_id[task_id]) _lowerCamelCase = executor.submit(__a , *__a ) futures.append(__a ) completion_id[task_id] += 1 n_samples += 1 for future in as_completed(__a ): _lowerCamelCase = future.result() results[result["task_id"]].append((result['completion_id'], result) ) _lowerCamelCase , _lowerCamelCase = [], [] for result in results.values(): result.sort() _lowerCamelCase = [r[1]['passed'] for r in result] total.append(len(__a ) ) correct.append(sum(__a ) ) _lowerCamelCase = np.array(__a ) _lowerCamelCase = np.array(__a ) _lowerCamelCase = k _lowerCamelCase = {F'pass@{k}': estimate_pass_at_k(__a , __a , __a ).mean() for k in ks if (total >= k).all()} return pass_at_k, results def SCREAMING_SNAKE_CASE_ ( snake_case : Union[str, Any] , snake_case : str , snake_case : Optional[int] )-> Dict: def estimator(snake_case : List[Any] , snake_case : int , snake_case : List[Any] ) -> float: if n - c < k: return 1.0 return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) ) if isinstance(__snake_case , __snake_case ): _lowerCamelCase = itertools.repeat(__snake_case , len(__snake_case ) ) else: assert len(__snake_case ) == len(__snake_case ) _lowerCamelCase = iter(__snake_case ) return np.array([estimator(int(__snake_case ) , int(__snake_case ) , __snake_case ) for n, c in zip(__snake_case , __snake_case )] )
712
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging A_ : Optional[Any] =logging.get_logger(__name__) A_ : Optional[Any] ={ """facebook/s2t-wav2vec2-large-en-de""": ( """https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json""" ), # See all Speech2Text models at https://huggingface.co/models?filter=speech2text2 } class __a ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE__ : int = "speech_to_text_2" SCREAMING_SNAKE_CASE__ : int = ["past_key_values"] SCREAMING_SNAKE_CASE__ : Any = {"num_attention_heads": "decoder_attention_heads", "hidden_size": "d_model"} def __init__( self , a__=1_00_00 , a__=6 , a__=20_48 , a__=4 , a__=0.0 , a__=True , a__="relu" , a__=2_56 , a__=0.1 , a__=0.0 , a__=0.0 , a__=0.02 , a__=2 , a__=True , a__=1 , a__=0 , a__=2 , a__=10_24 , **a__ , ): _lowerCamelCase = vocab_size _lowerCamelCase = d_model _lowerCamelCase = decoder_ffn_dim _lowerCamelCase = decoder_layers _lowerCamelCase = decoder_attention_heads _lowerCamelCase = dropout _lowerCamelCase = attention_dropout _lowerCamelCase = activation_dropout _lowerCamelCase = activation_function _lowerCamelCase = init_std _lowerCamelCase = decoder_layerdrop _lowerCamelCase = use_cache _lowerCamelCase = decoder_layers _lowerCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True _lowerCamelCase = max_target_positions super().__init__( pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , decoder_start_token_id=a__ , **a__ , )
222
0
"""simple docstring""" import time from contextlib import contextmanager from pathlib import Path import pytest import requests from huggingface_hub.hf_api import HfApi, HfFolder _lowercase = '''__DUMMY_TRANSFORMERS_USER__''' _lowercase = '''Dummy User''' _lowercase = '''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''' _lowercase = '''https://hub-ci.huggingface.co''' _lowercase = CI_HUB_ENDPOINT + '''/datasets/{repo_id}/resolve/{revision}/{path}''' _lowercase = CI_HUB_ENDPOINT + '''/{repo_id}/resolve/{revision}/{filename}''' _lowercase = Path('''~/.huggingface/hub_ci_token''').expanduser() @pytest.fixture def _snake_case ( snake_case__ : List[Any] ): monkeypatch.setattr( 'huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE' , snake_case__ ) @pytest.fixture def _snake_case ( snake_case__ : int ): monkeypatch.setattr('datasets.config.HF_ENDPOINT' , snake_case__ ) monkeypatch.setattr('datasets.config.HUB_DATASETS_URL' , snake_case__ ) @pytest.fixture def _snake_case ( snake_case__ : List[Any] ): monkeypatch.setattr('huggingface_hub.hf_api.HfFolder.path_token' , snake_case__ ) @pytest.fixture def _snake_case ( snake_case__ : Tuple , snake_case__ : Any ): HfFolder.save_token(snake_case__ ) yield HfFolder.delete_token() @pytest.fixture(scope='session' ) def _snake_case ( ): return HfApi(endpoint=snake_case__ ) @pytest.fixture(scope='session' ) def _snake_case ( snake_case__ : HfApi ): A = HfFolder.get_token() HfFolder.save_token(snake_case__ ) yield CI_HUB_USER_TOKEN if previous_token is not None: HfFolder.save_token(snake_case__ ) @pytest.fixture def _snake_case ( snake_case__ : Optional[int] ): def _cleanup_repo(snake_case__ : Dict ): hf_api.delete_repo(snake_case__ , token=snake_case__ , repo_type='dataset' ) return _cleanup_repo @pytest.fixture def _snake_case ( snake_case__ : Optional[Any] ): @contextmanager def _temporary_repo(snake_case__ : Dict ): try: yield repo_id finally: cleanup_repo(snake_case__ ) return _temporary_repo @pytest.fixture(scope='session' ) def _snake_case ( snake_case__ : HfApi , snake_case__ : Union[str, Any] , snake_case__ : List[Any] ): A = F'repo_txt_data-{int(time.time() * 10e3 )}' A = F'{CI_HUB_USER}/{repo_name}' hf_api.create_repo(snake_case__ , token=snake_case__ , repo_type='dataset' , private=snake_case__ ) hf_api.upload_file( token=snake_case__ , path_or_fileobj=str(snake_case__ ) , path_in_repo='data/text_data.txt' , repo_id=snake_case__ , repo_type='dataset' , ) yield repo_id try: hf_api.delete_repo(snake_case__ , token=snake_case__ , repo_type='dataset' ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def _snake_case ( snake_case__ : str , snake_case__ : Dict , snake_case__ : Dict ): return hf_private_dataset_repo_txt_data_ @pytest.fixture(scope='session' ) def _snake_case ( snake_case__ : HfApi , snake_case__ : Any , snake_case__ : str ): A = F'repo_zipped_txt_data-{int(time.time() * 10e3 )}' A = F'{CI_HUB_USER}/{repo_name}' hf_api.create_repo(snake_case__ , token=snake_case__ , repo_type='dataset' , private=snake_case__ ) hf_api.upload_file( token=snake_case__ , path_or_fileobj=str(snake_case__ ) , path_in_repo='data.zip' , repo_id=snake_case__ , repo_type='dataset' , ) yield repo_id try: hf_api.delete_repo(snake_case__ , token=snake_case__ , repo_type='dataset' ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def _snake_case ( snake_case__ : Union[str, Any] , snake_case__ : str , snake_case__ : Dict ): return hf_private_dataset_repo_zipped_txt_data_ @pytest.fixture(scope='session' ) def _snake_case ( snake_case__ : HfApi , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] ): A = F'repo_zipped_img_data-{int(time.time() * 10e3 )}' A = F'{CI_HUB_USER}/{repo_name}' hf_api.create_repo(snake_case__ , token=snake_case__ , repo_type='dataset' , private=snake_case__ ) hf_api.upload_file( token=snake_case__ , path_or_fileobj=str(snake_case__ ) , path_in_repo='data.zip' , repo_id=snake_case__ , repo_type='dataset' , ) yield repo_id try: hf_api.delete_repo(snake_case__ , token=snake_case__ , repo_type='dataset' ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def _snake_case ( snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : Union[str, Any] ): return hf_private_dataset_repo_zipped_img_data_
91
"""simple docstring""" # HF Trainer benchmarking tool # # This tool can be used to run and compare multiple dimensions of the HF Trainers args. # # It then prints a report once in github format with all the information that needs to be shared # with others and second time in a console-friendly format, so it's easier to use for tuning things up. # # The main idea is: # # ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \ # --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \ # --target-metric-key train_samples_per_second # # The variations can be any command line argument that you want to compare and not just dtype as in # the example. # # --variations allows you to compare variations in multiple dimensions. # # as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6 # times adding one of: # # 1. --tf32 0 --fp16 0 # 2. --tf32 0 --fp16 1 # 3. --tf32 0 --bf16 1 # 4. --tf32 1 --fp16 0 # 5. --tf32 1 --fp16 1 # 6. --tf32 1 --bf16 1 # # and print the results. This is just a cartesian product - and more than 2 dimensions can be used. # # If you want to rely on defaults, this: # --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' # is identical to this: # --variations '--tf32 0|--tf32 1' '|--fp16|--bf16' # # the leading empty variation in the 2nd dimension is a valid variation. # # So here we get the following 6 variations: # # 1. --tf32 0 # 2. --tf32 0 --fp16 # 3. --tf32 0 --bf16 # 4. --tf32 1 # 5. --tf32 1 --fp16 # 6. --tf32 1 --bf16 # # In this particular case we don't know what the default tf32 setting is as it's normally # pytorch-version dependent). That's why it's best to do an explicit setting of each variation: # `--tf32 0|--tf32 1` # # Here is a full example of a train: # # CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \ # --base-cmd \ # ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \ # --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \ # --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \ # --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \ # --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \ # --source_prefix "translate English to Romanian: " --warmup_steps 50 \ # --max_train_samples 20000 --dataloader_num_workers 2 ' \ # --target-metric-key train_samples_per_second --repeat-times 1 --variations \ # '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \ # --repeat-times 1 --base-variation '--tf32 0' # # and here is a possible output: # # # | Variation | Train | Diff | Train | # | | samples | % | loss | # | | per | | | # | | second | | | # |:----------------|----------:|-------:|--------:| # | --tf32 0 | 285.11 | 0 | 2.51 | # | --tf32 1 | 342.09 | 20 | 2.51 | # | --fp16 --tf32 0 | 423.49 | 49 | 2.51 | # | --fp16 --tf32 1 | 423.13 | 48 | 2.51 | # | --bf16 --tf32 0 | 416.80 | 46 | 2.52 | # | --bf16 --tf32 1 | 415.87 | 46 | 2.52 | # # # So you can quickly compare the different outcomes. # # Typically running each experiment once is enough, but if the environment is unstable you can # re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results. # # By default it'll use the lowest result as the base line to use as 100% and then compare the rest to # it as can be seen from the table above, but you can also specify which combination is the one to use as # the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0' # # --target-metric-key is there to tell the program which metrics to compare - the different metric keys are # inside output_dir/all_results.json. e.g., to measure eval performance instead of train use: # --target-metric-key eval_samples_per_second # but of course you will need to adjust the --base-cmd value in the example to perform evaluation as # well (as currently it doesn't) # import argparse import datetime import io import itertools import json import math import os import platform import re import shlex import subprocess import sys from pathlib import Path from statistics import fmean import pandas as pd import torch from tqdm import tqdm import transformers _lowercase = float('''nan''') class lowerCAmelCase_ : '''simple docstring''' def __init__( self : List[str] ,A_ : Tuple ) -> Any: A = sys.stdout A = open(A_ ,'a' ) def __getattr__( self : int ,A_ : Optional[Any] ) -> Tuple: return getattr(self.stdout ,A_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[int] ) -> str: self.stdout.write(A_ ) # strip tqdm codes self.file.write(re.sub(R'^.*\r' ,'' ,A_ ,0 ,re.M ) ) def _snake_case ( snake_case__ : Optional[Any]=80 , snake_case__ : List[str]=False ): A = [] # deal with critical env vars A = ['CUDA_VISIBLE_DEVICES'] for key in env_keys: A = os.environ.get(snake_case__ , snake_case__ ) if val is not None: cmd.append(F'{key}={val}' ) # python executable (not always needed if the script is executable) A = sys.executable if full_python_path else sys.executable.split('/' )[-1] cmd.append(snake_case__ ) # now the normal args cmd += list(map(shlex.quote , sys.argv ) ) # split up into up to MAX_WIDTH lines with shell multi-line escapes A = [] A = '' while len(snake_case__ ) > 0: current_line += F'{cmd.pop(0 )} ' if len(snake_case__ ) == 0 or len(snake_case__ ) + len(cmd[0] ) + 1 > max_width - 1: lines.append(snake_case__ ) A = '' return "\\\n".join(snake_case__ ) def _snake_case ( snake_case__ : str , snake_case__ : str ): # unwrap multi-line input A = re.sub(r'[\\\n]+' , ' ' , args.base_cmd ) # remove --output_dir if any and set our own A = re.sub('--output_dir\s+[^\s]+' , '' , args.base_cmd ) args.base_cmd += F' --output_dir {output_dir}' # ensure we have --overwrite_output_dir A = re.sub('--overwrite_output_dir\s+' , '' , args.base_cmd ) args.base_cmd += " --overwrite_output_dir" return [sys.executable] + shlex.split(args.base_cmd ) def _snake_case ( snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : str , snake_case__ : int , snake_case__ : Optional[Any] , snake_case__ : Any , snake_case__ : List[Any] ): # Enable to debug everything but the run itself, to do it fast and see the progress. # This is useful for debugging the output formatting quickly - we can remove it later once # everybody is happy with the output if 0: import random from time import sleep sleep(0 ) return dict( {k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6666, 222.22222222] )} , ) A = subprocess.run(snake_case__ , capture_output=snake_case__ , text=snake_case__ ) if verbose: print('STDOUT' , result.stdout ) print('STDERR' , result.stderr ) # save the streams A = variation.replace(' ' , '-' ) with open(Path(snake_case__ ) / F'log.{prefix}.stdout.txt' , 'w' ) as f: f.write(result.stdout ) with open(Path(snake_case__ ) / F'log.{prefix}.stderr.txt' , 'w' ) as f: f.write(result.stderr ) if result.returncode != 0: if verbose: print('failed' ) return {target_metric_key: nan} with io.open(F'{output_dir}/all_results.json' , 'r' , encoding='utf-8' ) as f: A = json.load(snake_case__ ) # filter out just the keys we want return {k: v for k, v in metrics.items() if k in metric_keys} def _snake_case ( snake_case__ : str , snake_case__ : str , snake_case__ : str , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : Tuple , snake_case__ : List[Any] , snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : Optional[Any] , ): A = [] A = [] A = F'{id}: {variation:<{longest_variation_len}}' A = F'{preamble}: ' A = set(report_metric_keys + [target_metric_key] ) for i in tqdm(range(snake_case__ ) , desc=snake_case__ , leave=snake_case__ ): A = process_run_single( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) A = single_run_metrics[target_metric_key] if not math.isnan(snake_case__ ): metrics.append(snake_case__ ) results.append(snake_case__ ) outcome += "✓" else: outcome += "✘" A = F'\33[2K\r{outcome}' if len(snake_case__ ) > 0: A = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()} A = round(mean_metrics[target_metric_key] , 2 ) A = F'{outcome} {mean_target}' if len(snake_case__ ) > 1: results_str += F' {tuple(round(snake_case__ , 2 ) for x in results )}' print(snake_case__ ) A = variation return mean_metrics else: print(snake_case__ ) return {variation_key: variation, target_metric_key: nan} def _snake_case ( ): A = torch.cuda.get_device_properties(torch.device('cuda' ) ) return F'\nDatetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}\n\nSoftware:\ntransformers: {transformers.__version__}\ntorch : {torch.__version__}\ncuda : {torch.version.cuda}\npython : {platform.python_version()}\n\nHardware:\n{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB\n' def _snake_case ( snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : Union[str, Any] ): A = pd.DataFrame(snake_case__ ) A = 'variation' A = 'diff_%' A = nan if base_variation is not None and len(df[df[variation_key] == base_variation] ): # this may still return nan A = df.loc[df[variation_key] == base_variation][target_metric_key].item() if math.isnan(snake_case__ ): # as a fallback, use the minimal value as the sentinel A = df.loc[df[target_metric_key] != nan][target_metric_key].min() # create diff column if possible if not math.isnan(snake_case__ ): A = df.apply( lambda snake_case__ : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value ) if not math.isnan(r[target_metric_key] ) else 0 , axis='columns' , ) # re-order columns A = [variation_key, target_metric_key, diff_key, *report_metric_keys] A = df.reindex(snake_case__ , axis='columns' ) # reorder cols # capitalize A = df.rename(str.capitalize , axis='columns' ) # make the cols as narrow as possible A = df.rename(lambda snake_case__ : c.replace('_' , '<br>' ) , axis='columns' ) A = df.rename(lambda snake_case__ : c.replace('_' , '\n' ) , axis='columns' ) A = ['', 'Copy between the cut-here-lines and paste as is to github or a forum'] report += ["----------8<-----------------8<--------"] report += ["*** Results:", df_github.to_markdown(index=snake_case__ , floatfmt='.2f' )] report += ["```"] report += ["*** Setup:", get_versions()] report += ["*** The benchmark command line was:", get_original_command()] report += ["```"] report += ["----------8<-----------------8<--------"] report += ["*** Results (console):", df_console.to_markdown(index=snake_case__ , floatfmt='.2f' )] print('\n\n'.join(snake_case__ ) ) def _snake_case ( ): A = argparse.ArgumentParser() parser.add_argument( '--base-cmd' , default=snake_case__ , type=snake_case__ , required=snake_case__ , help='Base cmd' , ) parser.add_argument( '--variations' , default=snake_case__ , type=snake_case__ , nargs='+' , required=snake_case__ , help='Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'' , ) parser.add_argument( '--base-variation' , default=snake_case__ , type=snake_case__ , help='Baseline variation to compare to. if None the minimal target value will be used to compare against' , ) parser.add_argument( '--target-metric-key' , default=snake_case__ , type=snake_case__ , required=snake_case__ , help='Target metric key in output_dir/all_results.json, e.g., train_samples_per_second' , ) parser.add_argument( '--report-metric-keys' , default='' , type=snake_case__ , help='Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples' , ) parser.add_argument( '--repeat-times' , default=1 , type=snake_case__ , help='How many times to re-run each variation - an average will be reported' , ) parser.add_argument( '--output_dir' , default='output_benchmark' , type=snake_case__ , help='The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked' , ) parser.add_argument( '--verbose' , default=snake_case__ , action='store_true' , help='Whether to show the outputs of each run or just the benchmark progress' , ) A = parser.parse_args() A = args.output_dir Path(snake_case__ ).mkdir(exist_ok=snake_case__ ) A = get_base_command(snake_case__ , snake_case__ ) # split each dimension into its --foo variations A = [list(map(str.strip , re.split(r'\|' , snake_case__ ) ) ) for x in args.variations] # build a cartesian product of dimensions and convert those back into cmd-line arg strings, # while stripping white space for inputs that were empty A = list(map(str.strip , map(' '.join , itertools.product(*snake_case__ ) ) ) ) A = max(len(snake_case__ ) for x in variations ) # split wanted keys A = args.report_metric_keys.split() # capture prints into a log file for convenience A = F'benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt' print(F'\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt' ) print(F'and this script\'s output is also piped into {report_fn}' ) A = Tee(snake_case__ ) print(F'\n*** Running {len(snake_case__ )} benchmarks:' ) print(F'Base command: {" ".join(snake_case__ )}' ) A = 'variation' A = [] for id, variation in enumerate(tqdm(snake_case__ , desc='Total completion: ' , leave=snake_case__ ) ): A = base_cmd + variation.split() results.append( process_run( id + 1 , snake_case__ , snake_case__ , snake_case__ , snake_case__ , args.target_metric_key , snake_case__ , args.repeat_times , snake_case__ , args.verbose , ) ) process_results(snake_case__ , args.target_metric_key , snake_case__ , args.base_variation , snake_case__ ) if __name__ == "__main__": main()
91
1
import os import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from huggingface_hub.file_download import http_get from requests.exceptions import HTTPError from transformers import ( AlbertTokenizer, AutoTokenizer, BertTokenizer, BertTokenizerFast, GPTaTokenizerFast, is_tokenizers_available, ) from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers from transformers.tokenization_utils import Trie sys.path.append(str(Path(__file__).parent.parent / """utils""")) from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class _UpperCAmelCase ( unittest.TestCase ): def _lowerCamelCase ( self ): # A mock response for an HTTP head request to emulate server down A_ : Tuple = mock.Mock() A_ : List[str] = 500 A_ : Any = {} A_ : Union[str, Any] = HTTPError A_ : Tuple = {} # Download this model to make sure it's in the cache. A_ : Any = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" ) # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch("""requests.Session.request""" , return_value=a__ ) as mock_head: A_ : Dict = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" ) # This check we did call the fake head request mock_head.assert_called() @require_tokenizers def _lowerCamelCase ( self ): # A mock response for an HTTP head request to emulate server down A_ : Union[str, Any] = mock.Mock() A_ : int = 500 A_ : Any = {} A_ : Dict = HTTPError A_ : List[Any] = {} # Download this model to make sure it's in the cache. A_ : Optional[int] = GPTaTokenizerFast.from_pretrained("""gpt2""" ) # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch("""requests.Session.request""" , return_value=a__ ) as mock_head: A_ : Any = GPTaTokenizerFast.from_pretrained("""gpt2""" ) # This check we did call the fake head request mock_head.assert_called() def _lowerCamelCase ( self ): # This test is for deprecated behavior and can be removed in v5 try: A_ : Union[str, Any] = tempfile.mktemp() with open(a__ , """wb""" ) as f: http_get("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" , a__ ) A_ : Tuple = AlbertTokenizer.from_pretrained(a__ ) finally: os.remove(a__ ) # Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in # the current folder and have the right name. if os.path.isfile("""tokenizer.json""" ): # We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it. return try: with open("""tokenizer.json""" , """wb""" ) as f: http_get("""https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json""" , a__ ) A_ : Dict = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) # The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000 self.assertEqual(tokenizer.vocab_size , 1000 ) # Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file. finally: os.remove("""tokenizer.json""" ) def _lowerCamelCase ( self ): # This test is for deprecated behavior and can be removed in v5 A_ : Union[str, Any] = AlbertTokenizer.from_pretrained("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" ) @is_staging_test class _UpperCAmelCase ( unittest.TestCase ): a = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou'''] @classmethod def _lowerCamelCase ( cls ): A_ : List[str] = TOKEN HfFolder.save_token(a__ ) @classmethod def _lowerCamelCase ( cls ): try: delete_repo(token=cls._token , repo_id="""test-tokenizer""" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="""valid_org/test-tokenizer-org""" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="""test-dynamic-tokenizer""" ) except HTTPError: pass def _lowerCamelCase ( self ): with tempfile.TemporaryDirectory() as tmp_dir: A_ : List[str] = os.path.join(a__ , """vocab.txt""" ) with open(a__ , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) ) A_ : str = BertTokenizer(a__ ) tokenizer.push_to_hub("""test-tokenizer""" , use_auth_token=self._token ) A_ : Dict = BertTokenizer.from_pretrained(F"""{USER}/test-tokenizer""" ) self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab ) # Reset repo delete_repo(token=self._token , repo_id="""test-tokenizer""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(a__ , repo_id="""test-tokenizer""" , push_to_hub=a__ , use_auth_token=self._token ) A_ : List[str] = BertTokenizer.from_pretrained(F"""{USER}/test-tokenizer""" ) self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab ) def _lowerCamelCase ( self ): with tempfile.TemporaryDirectory() as tmp_dir: A_ : List[Any] = os.path.join(a__ , """vocab.txt""" ) with open(a__ , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) ) A_ : Any = BertTokenizer(a__ ) tokenizer.push_to_hub("""valid_org/test-tokenizer-org""" , use_auth_token=self._token ) A_ : Any = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" ) self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab ) # Reset repo delete_repo(token=self._token , repo_id="""valid_org/test-tokenizer-org""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained( a__ , repo_id="""valid_org/test-tokenizer-org""" , push_to_hub=a__ , use_auth_token=self._token ) A_ : Union[str, Any] = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" ) self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab ) @require_tokenizers def _lowerCamelCase ( self ): CustomTokenizer.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: A_ : str = os.path.join(a__ , """vocab.txt""" ) with open(a__ , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) ) A_ : Optional[int] = CustomTokenizer(a__ ) # No fast custom tokenizer tokenizer.push_to_hub("""test-dynamic-tokenizer""" , use_auth_token=self._token ) A_ : Union[str, Any] = AutoTokenizer.from_pretrained(F"""{USER}/test-dynamic-tokenizer""" , trust_remote_code=a__ ) # Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ , """CustomTokenizer""" ) # Fast and slow custom tokenizer CustomTokenizerFast.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: A_ : int = os.path.join(a__ , """vocab.txt""" ) with open(a__ , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) ) A_ : Tuple = BertTokenizerFast.from_pretrained(a__ ) bert_tokenizer.save_pretrained(a__ ) A_ : List[Any] = CustomTokenizerFast.from_pretrained(a__ ) tokenizer.push_to_hub("""test-dynamic-tokenizer""" , use_auth_token=self._token ) A_ : List[Any] = AutoTokenizer.from_pretrained(F"""{USER}/test-dynamic-tokenizer""" , trust_remote_code=a__ ) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ , """CustomTokenizerFast""" ) A_ : List[str] = AutoTokenizer.from_pretrained( F"""{USER}/test-dynamic-tokenizer""" , use_fast=a__ , trust_remote_code=a__ ) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ , """CustomTokenizer""" ) class _UpperCAmelCase ( unittest.TestCase ): def _lowerCamelCase ( self ): A_ : int = Trie() trie.add("""Hello 友達""" ) self.assertEqual(trie.data , {"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {""" """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} ) trie.add("""Hello""" ) trie.data self.assertEqual(trie.data , {"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {"""""": 1, """ """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} ) def _lowerCamelCase ( self ): A_ : List[str] = Trie() self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) , ["""[CLS] This is a extra_id_100"""] ) trie.add("""[CLS]""" ) trie.add("""extra_id_1""" ) trie.add("""extra_id_100""" ) self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) , ["""[CLS]""", """ This is a """, """extra_id_100"""] ) def _lowerCamelCase ( self ): A_ : Optional[Any] = Trie() trie.add("""A""" ) self.assertEqual(trie.split("""ABC""" ) , ["""A""", """BC"""] ) self.assertEqual(trie.split("""BCA""" ) , ["""BC""", """A"""] ) def _lowerCamelCase ( self ): A_ : Any = Trie() trie.add("""TOKEN]""" ) trie.add("""[SPECIAL_TOKEN]""" ) self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) , ["""This is something """, """[SPECIAL_TOKEN]"""] ) def _lowerCamelCase ( self ): A_ : List[Any] = Trie() trie.add("""A""" ) trie.add("""P""" ) trie.add("""[SPECIAL_TOKEN]""" ) self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) , ["""This is something """, """[SPECIAL_TOKEN]"""] ) def _lowerCamelCase ( self ): A_ : str = Trie() trie.add("""AB""" ) trie.add("""B""" ) trie.add("""C""" ) self.assertEqual(trie.split("""ABC""" ) , ["""AB""", """C"""] ) def _lowerCamelCase ( self ): A_ : Dict = Trie() trie.add("""ABC""" ) trie.add("""B""" ) trie.add("""CD""" ) self.assertEqual(trie.split("""ABCD""" ) , ["""ABC""", """D"""] ) def _lowerCamelCase ( self ): # Even if the offsets are wrong, we necessarily output correct string # parts. A_ : Optional[int] = Trie() A_ : Union[str, Any] = trie.cut_text("""ABC""" , [0, 0, 2, 1, 2, 3] ) self.assertEqual(a__ , ["""AB""", """C"""] )
713
def _lowerCAmelCase ( _lowerCAmelCase ): '''simple docstring''' A_ : Union[str, Any] = [0] * len(_lowerCAmelCase ) A_ : Optional[int] = [] A_ : str = [] A_ : Dict = 0 for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(_lowerCAmelCase ) ): if indegree[i] == 0: queue.append(_lowerCAmelCase ) while queue: A_ : List[str] = queue.pop(0 ) cnt += 1 topo.append(_lowerCAmelCase ) for x in graph[vertex]: indegree[x] -= 1 if indegree[x] == 0: queue.append(_lowerCAmelCase ) if cnt != len(_lowerCAmelCase ): print("""Cycle exists""" ) else: print(_lowerCAmelCase ) # Adjacency List of Graph _lowerCAmelCase = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []} topological_sort(graph)
481
0
"""simple docstring""" import argparse import collections import numpy as np import torch from flax import traverse_util from tax import checkpoints from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" return params[F'''{prefix}/{prefix}/relpos_bias/rel_embedding'''][:, i, :] def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE="attention" ) -> List[str]: """simple docstring""" __snake_case = __snake_case = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/key/kernel'''][:, i, :, :] ) __snake_case = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] ) __snake_case = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/out/kernel'''][:, i, :, :] ) __snake_case = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] ) __snake_case = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/query/kernel'''][:, i, :, :] ) __snake_case = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] ) __snake_case = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/value/kernel'''][:, i, :, :] ) __snake_case = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] ) return k, o, q, v def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> Optional[int]: """simple docstring""" if split_mlp_wi: __snake_case = params[F'''{prefix}/{prefix}/mlp/wi_0/kernel'''][:, i, :] __snake_case = params[F'''{prefix}/{prefix}/mlp/wi_1/kernel'''][:, i, :] __snake_case = (wi_a, wi_a) else: __snake_case = params[F'''{prefix}/{prefix}/mlp/wi/kernel'''][:, i, :] __snake_case = params[F'''{prefix}/{prefix}/mlp/wo/kernel'''][:, i, :] return wi, wo def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" return params[F'''{prefix}/{prefix}/{layer_name}/scale'''][:, i] def __UpperCamelCase ( SCREAMING_SNAKE_CASE , *, SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False ) -> int: """simple docstring""" __snake_case = traverse_util.flatten_dict(variables["target"] ) __snake_case = {"/".join(SCREAMING_SNAKE_CASE ): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi __snake_case = "encoder/encoder/mlp/wi_0/kernel" in old print("Split MLP:" , SCREAMING_SNAKE_CASE ) __snake_case = collections.OrderedDict() # Shared embeddings. __snake_case = old["token_embedder/embedding"] # Encoder. for i in range(SCREAMING_SNAKE_CASE ): # Block i, layer 0 (Self Attention). __snake_case = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , "encoder" , "pre_attention_layer_norm" ) __snake_case , __snake_case , __snake_case , __snake_case = tax_attention_lookup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , "encoder" , "attention" ) __snake_case = layer_norm __snake_case = k.T __snake_case = o.T __snake_case = q.T __snake_case = v.T # Block i, layer 1 (MLP). __snake_case = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , "encoder" , "pre_mlp_layer_norm" ) __snake_case , __snake_case = tax_mlp_lookup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , "encoder" , SCREAMING_SNAKE_CASE ) __snake_case = layer_norm if split_mlp_wi: __snake_case = wi[0].T __snake_case = wi[1].T else: __snake_case = wi.T __snake_case = wo.T if scalable_attention: # convert the rel_embedding of each layer __snake_case = tax_relpos_bias_lookup( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , "encoder" ).T __snake_case = old["encoder/encoder_norm/scale"] if not scalable_attention: __snake_case = tax_relpos_bias_lookup( SCREAMING_SNAKE_CASE , 0 , "encoder" ).T __snake_case = tax_relpos_bias_lookup( SCREAMING_SNAKE_CASE , 0 , "decoder" ).T if not is_encoder_only: # Decoder. for i in range(SCREAMING_SNAKE_CASE ): # Block i, layer 0 (Self Attention). __snake_case = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , "decoder" , "pre_self_attention_layer_norm" ) __snake_case , __snake_case , __snake_case , __snake_case = tax_attention_lookup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , "decoder" , "self_attention" ) __snake_case = layer_norm __snake_case = k.T __snake_case = o.T __snake_case = q.T __snake_case = v.T # Block i, layer 1 (Cross Attention). __snake_case = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , "decoder" , "pre_cross_attention_layer_norm" ) __snake_case , __snake_case , __snake_case , __snake_case = tax_attention_lookup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , "decoder" , "encoder_decoder_attention" ) __snake_case = layer_norm __snake_case = k.T __snake_case = o.T __snake_case = q.T __snake_case = v.T # Block i, layer 2 (MLP). __snake_case = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , "decoder" , "pre_mlp_layer_norm" ) __snake_case , __snake_case = tax_mlp_lookup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , "decoder" , SCREAMING_SNAKE_CASE ) __snake_case = layer_norm if split_mlp_wi: __snake_case = wi[0].T __snake_case = wi[1].T else: __snake_case = wi.T __snake_case = wo.T if scalable_attention: # convert the rel_embedding of each layer __snake_case = tax_relpos_bias_lookup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , "decoder" ).T __snake_case = old["decoder/decoder_norm/scale"] # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: __snake_case = old["decoder/logits_dense/kernel"].T return new def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" __snake_case = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] ) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: __snake_case = state_dict["shared.weight"] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: __snake_case = state_dict["shared.weight"] if "lm_head.weight" not in state_dict: # For old 1.0 models. print("Using shared word embeddings as lm_head." ) __snake_case = state_dict["shared.weight"] return state_dict def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" __snake_case = checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE ) __snake_case = convert_tax_to_pytorch( SCREAMING_SNAKE_CASE , num_layers=config.num_layers , is_encoder_only=SCREAMING_SNAKE_CASE , scalable_attention=SCREAMING_SNAKE_CASE ) __snake_case = make_state_dict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) model.load_state_dict(SCREAMING_SNAKE_CASE , strict=SCREAMING_SNAKE_CASE ) def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = False , ) -> Optional[Any]: """simple docstring""" __snake_case = MTaConfig.from_json_file(SCREAMING_SNAKE_CASE ) print(F'''Building PyTorch model from configuration: {config}''' ) # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: __snake_case = UMTaEncoderModel(SCREAMING_SNAKE_CASE ) else: __snake_case = UMTaForConditionalGeneration(SCREAMING_SNAKE_CASE ) # Load weights from tf checkpoint load_tax_weights_in_ta(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) model.save_pretrained(SCREAMING_SNAKE_CASE ) # Verify that we can load the checkpoint. model.from_pretrained(SCREAMING_SNAKE_CASE ) print("Done" ) if __name__ == "__main__": _SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""") # Required parameters parser.add_argument( """--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint.""" ) parser.add_argument( """--config_file""", default=None, type=str, required=True, help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""", ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False ) parser.add_argument( """--scalable_attention""", action="""store_true""", help="""Whether the model uses scaled attention (umt5 model)""", default=False, ) _SCREAMING_SNAKE_CASE = parser.parse_args() convert_tax_checkpoint_to_pytorch( args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only, args.scalable_attention, )
163
"""simple docstring""" import warnings from ...utils import logging from .image_processing_segformer import SegformerImageProcessor _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) class __magic_name__ ( lowercase__ ): def __init__( self : str , *snake_case_ : Optional[Any] , **snake_case_ : int ): warnings.warn( "The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers." " Please use SegformerImageProcessor instead." , snake_case_ , ) super().__init__(*snake_case_ , **snake_case_ )
163
1
def snake_case_ ( snake_case , snake_case , snake_case , snake_case ) -> List[str]: global f # a global dp table for knapsack if f[i][j] < 0: if j < wt[i - 1]: lowercase__: Tuple = mf_knapsack(i - 1 , snake_case , snake_case , snake_case ) else: lowercase__: Optional[Any] = max( mf_knapsack(i - 1 , snake_case , snake_case , snake_case ) , mf_knapsack(i - 1 , snake_case , snake_case , j - wt[i - 1] ) + val[i - 1] , ) lowercase__: Optional[Any] = val return f[i][j] def snake_case_ ( snake_case , snake_case , snake_case , snake_case ) -> Dict: lowercase__: Dict = [[0] * (w + 1) for _ in range(n + 1 )] for i in range(1 , n + 1 ): for w_ in range(1 , w + 1 ): if wt[i - 1] <= w_: lowercase__: Tuple = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] ) else: lowercase__: Optional[Any] = dp[i - 1][w_] return dp[n][w_], dp def snake_case_ ( snake_case , snake_case , snake_case ) -> str: if not (isinstance(snake_case , (list, tuple) ) and isinstance(snake_case , (list, tuple) )): raise ValueError( 'Both the weights and values vectors must be either lists or tuples' ) lowercase__: str = len(snake_case ) if num_items != len(snake_case ): lowercase__: List[str] = ( 'The number of weights must be the same as the number of values.\n' f'But got {num_items} weights and {len(snake_case )} values' ) raise ValueError(snake_case ) for i in range(snake_case ): if not isinstance(wt[i] , snake_case ): lowercase__: List[Any] = ( 'All weights must be integers but got weight of ' f'type {type(wt[i] )} at index {i}' ) raise TypeError(snake_case ) lowercase__ , lowercase__: Union[str, Any] = knapsack(snake_case , snake_case , snake_case , snake_case ) lowercase__: set = set() _construct_solution(snake_case , snake_case , snake_case , snake_case , snake_case ) return optimal_val, example_optional_set def snake_case_ ( snake_case , snake_case , snake_case , snake_case , snake_case ) -> str: # for the current item i at a maximum weight j to be part of an optimal subset, # the optimal value at (i, j) must be greater than the optimal value at (i-1, j). # where i - 1 means considering only the previous items at the given maximum weight if i > 0 and j > 0: if dp[i - 1][j] == dp[i][j]: _construct_solution(snake_case , snake_case , i - 1 , snake_case , snake_case ) else: optimal_set.add(snake_case ) _construct_solution(snake_case , snake_case , i - 1 , j - wt[i - 1] , snake_case ) if __name__ == "__main__": __lowerCAmelCase = [3, 2, 4, 4] __lowerCAmelCase = [4, 3, 2, 3] __lowerCAmelCase = 4 __lowerCAmelCase = 6 __lowerCAmelCase = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)] __lowerCAmelCase ,__lowerCAmelCase = knapsack(w, wt, val, n) print(optimal_solution) print(mf_knapsack(n, wt, val, w)) # switched the n and w # testing the dynamic programming problem with example # the optimal subset for the above example are items 3 and 4 __lowerCAmelCase ,__lowerCAmelCase = knapsack_with_example_solution(w, wt, val) assert optimal_solution == 8 assert optimal_subset == {3, 4} print('''optimal_value = ''', optimal_solution) print('''An optimal subset corresponding to the optimal value''', optimal_subset)
335
import functools import logging import os import sys import threading from logging import ( CRITICAL, # NOQA DEBUG, # NOQA ERROR, # NOQA FATAL, # NOQA INFO, # NOQA NOTSET, # NOQA WARN, # NOQA WARNING, # NOQA ) from typing import Optional import huggingface_hub.utils as hf_hub_utils from tqdm import auto as tqdm_lib __lowerCAmelCase = threading.Lock() __lowerCAmelCase = None __lowerCAmelCase = { '''debug''': logging.DEBUG, '''info''': logging.INFO, '''warning''': logging.WARNING, '''error''': logging.ERROR, '''critical''': logging.CRITICAL, } __lowerCAmelCase = logging.WARNING __lowerCAmelCase = True def snake_case_ ( ) -> Optional[Any]: lowercase__: Optional[int] = os.getenv('TRANSFORMERS_VERBOSITY' , snake_case ) if env_level_str: if env_level_str in log_levels: return log_levels[env_level_str] else: logging.getLogger().warning( f'Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, ' f'has to be one of: { ", ".join(log_levels.keys() ) }' ) return _default_log_level def snake_case_ ( ) -> str: return __name__.split('.' )[0] def snake_case_ ( ) -> logging.Logger: return logging.getLogger(_get_library_name() ) def snake_case_ ( ) -> None: global _default_handler with _lock: if _default_handler: # This library has already configured the library root logger. return lowercase__: str = logging.StreamHandler() # Set sys.stderr as stream. lowercase__: Optional[Any] = sys.stderr.flush # Apply our default configuration to the library root logger. lowercase__: Any = _get_library_root_logger() library_root_logger.addHandler(_default_handler ) library_root_logger.setLevel(_get_default_logging_level() ) lowercase__: Union[str, Any] = False def snake_case_ ( ) -> None: global _default_handler with _lock: if not _default_handler: return lowercase__: Optional[Any] = _get_library_root_logger() library_root_logger.removeHandler(_default_handler ) library_root_logger.setLevel(logging.NOTSET ) lowercase__: List[str] = None def snake_case_ ( ) -> Union[str, Any]: return log_levels def snake_case_ ( snake_case = None ) -> logging.Logger: if name is None: lowercase__: Optional[Any] = _get_library_name() _configure_library_root_logger() return logging.getLogger(snake_case ) def snake_case_ ( ) -> int: _configure_library_root_logger() return _get_library_root_logger().getEffectiveLevel() def snake_case_ ( snake_case ) -> None: _configure_library_root_logger() _get_library_root_logger().setLevel(snake_case ) def snake_case_ ( ) -> List[str]: return set_verbosity(snake_case ) def snake_case_ ( ) -> List[Any]: return set_verbosity(snake_case ) def snake_case_ ( ) -> Union[str, Any]: return set_verbosity(snake_case ) def snake_case_ ( ) -> Any: return set_verbosity(snake_case ) def snake_case_ ( ) -> None: _configure_library_root_logger() assert _default_handler is not None _get_library_root_logger().removeHandler(_default_handler ) def snake_case_ ( ) -> None: _configure_library_root_logger() assert _default_handler is not None _get_library_root_logger().addHandler(_default_handler ) def snake_case_ ( snake_case ) -> None: _configure_library_root_logger() assert handler is not None _get_library_root_logger().addHandler(snake_case ) def snake_case_ ( snake_case ) -> None: _configure_library_root_logger() assert handler is not None and handler not in _get_library_root_logger().handlers _get_library_root_logger().removeHandler(snake_case ) def snake_case_ ( ) -> None: _configure_library_root_logger() lowercase__: Optional[Any] = False def snake_case_ ( ) -> None: _configure_library_root_logger() lowercase__: Optional[int] = True def snake_case_ ( ) -> None: lowercase__: str = _get_library_root_logger().handlers for handler in handlers: lowercase__: Any = logging.Formatter('[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s' ) handler.setFormatter(snake_case ) def snake_case_ ( ) -> None: lowercase__: Optional[int] = _get_library_root_logger().handlers for handler in handlers: handler.setFormatter(snake_case ) def snake_case_ ( self , *snake_case , **snake_case ) -> Union[str, Any]: lowercase__: Any = os.getenv('TRANSFORMERS_NO_ADVISORY_WARNINGS' , snake_case ) if no_advisory_warnings: return self.warning(*snake_case , **snake_case ) __lowerCAmelCase = warning_advice @functools.lru_cache(snake_case ) def snake_case_ ( self , *snake_case , **snake_case ) -> Any: self.warning(*snake_case , **snake_case ) __lowerCAmelCase = warning_once class __a : def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[str]: # pylint: disable=unused-argument '''simple docstring''' lowercase__: Union[str, Any] = args[0] if args else None def __iter__( self ) -> List[Any]: '''simple docstring''' return iter(self._iterator ) def __getattr__( self , lowerCAmelCase__ ) -> Tuple: '''simple docstring''' def empty_fn(*lowerCAmelCase__ , **lowerCAmelCase__ ): # pylint: disable=unused-argument return return empty_fn def __enter__( self ) -> List[str]: '''simple docstring''' return self def __exit__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int: '''simple docstring''' return class __a : def __call__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Union[str, Any]: '''simple docstring''' if _tqdm_active: return tqdm_lib.tqdm(*lowerCAmelCase__ , **lowerCAmelCase__ ) else: return EmptyTqdm(*lowerCAmelCase__ , **lowerCAmelCase__ ) def SCREAMING_SNAKE_CASE__ ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[str]: '''simple docstring''' lowercase__: str = None if _tqdm_active: return tqdm_lib.tqdm.set_lock(*lowerCAmelCase__ , **lowerCAmelCase__ ) def SCREAMING_SNAKE_CASE__ ( self ) -> int: '''simple docstring''' if _tqdm_active: return tqdm_lib.tqdm.get_lock() __lowerCAmelCase = _tqdm_cls() def snake_case_ ( ) -> bool: global _tqdm_active return bool(_tqdm_active ) def snake_case_ ( ) -> Union[str, Any]: global _tqdm_active lowercase__: List[str] = True hf_hub_utils.enable_progress_bars() def snake_case_ ( ) -> int: global _tqdm_active lowercase__: List[Any] = False hf_hub_utils.disable_progress_bars()
335
1
'''simple docstring''' import argparse import json import os from pathlib import Path import requests import torch from transformers import JukeboxConfig, JukeboxModel from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase : int = logging.get_logger(__name__) lowerCamelCase : str = 'https://openaipublic.azureedge.net/jukebox/models/' lowerCamelCase : Optional[Any] = { 'jukebox-1b-lyrics': [ '5b/vqvae.pth.tar', '5b/prior_level_0.pth.tar', '5b/prior_level_1.pth.tar', '1b_lyrics/prior_level_2.pth.tar', ], 'jukebox-5b-lyrics': [ '5b/vqvae.pth.tar', '5b/prior_level_0.pth.tar', '5b/prior_level_1.pth.tar', '5b_lyrics/prior_level_2.pth.tar', ], } def _SCREAMING_SNAKE_CASE (A ) -> Optional[int]: """simple docstring""" if key.endswith('''.model.1.bias''' ) and len(key.split('''.''' ) ) > 10: lowercase__ = key.replace('''.model.1.bias''' , '''.conv1d_1.bias''' ) elif key.endswith('''.model.1.weight''' ) and len(key.split('''.''' ) ) > 10: lowercase__ = key.replace('''.model.1.weight''' , '''.conv1d_1.weight''' ) elif key.endswith('''.model.3.bias''' ) and len(key.split('''.''' ) ) > 10: lowercase__ = key.replace('''.model.3.bias''' , '''.conv1d_2.bias''' ) elif key.endswith('''.model.3.weight''' ) and len(key.split('''.''' ) ) > 10: lowercase__ = key.replace('''.model.3.weight''' , '''.conv1d_2.weight''' ) if "conditioner_blocks.0." in key: lowercase__ = key.replace('''conditioner_blocks.0''' , '''conditioner_blocks''' ) if "prime_prior" in key: lowercase__ = key.replace('''prime_prior''' , '''encoder''' ) if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key: lowercase__ = key.replace('''.emb.''' , '''.''' ) if key.endswith('''k''' ): # replace vqvae.X.k with vqvae.X.codebook return key.replace('''.k''' , '''.codebook''' ) if "y_emb." in key: return key.replace('''y_emb.''' , '''metadata_embedding.''' ) if "x_emb.emb." in key: lowercase__ = key.replace('''0.x_emb.emb''' , '''embed_tokens''' ) if "prime_state_ln" in key: return key.replace('''prime_state_ln''' , '''encoder.final_layer_norm''' ) if ".ln" in key: return key.replace('''.ln''' , '''.layer_norm''' ) if "_ln" in key: return key.replace('''_ln''' , '''_layer_norm''' ) if "prime_state_proj" in key: return key.replace('''prime_state_proj''' , '''encoder.proj_in''' ) if "prime_x_out" in key: return key.replace('''prime_x_out''' , '''encoder.lm_head''' ) if "prior.x_out" in key: return key.replace('''x_out''' , '''fc_proj_out''' ) if "x_emb" in key: return key.replace('''x_emb''' , '''embed_tokens''' ) return key def _SCREAMING_SNAKE_CASE (A , A , A , A ) -> Tuple: """simple docstring""" lowercase__ = {} import re lowercase__ = re.compile(R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' ) lowercase__ = re.compile( R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' ) lowercase__ = re.compile(R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' ) lowercase__ = re.compile(R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' ) lowercase__ = re.compile( R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' ) lowercase__ = re.compile(R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' ) lowercase__ = re.compile(R'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)''' ) lowercase__ = re.compile( R'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' ) lowercase__ = re.compile(R'''conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)''' ) for original_key, value in state_dict.items(): # rename vqvae.encoder keys if re_encoder_block_conv_in.fullmatch(A ): lowercase__ = re_encoder_block_conv_in.match(A ) lowercase__ = regex_match.groups() lowercase__ = int(groups[2] ) * 2 + int(groups[3] ) lowercase__ = f"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}" lowercase__ = re_encoder_block_conv_in.sub(A , A ) elif re_encoder_block_resnet.fullmatch(A ): lowercase__ = re_encoder_block_resnet.match(A ) lowercase__ = regex_match.groups() lowercase__ = int(groups[2] ) * 2 + int(groups[3] ) lowercase__ = {'''1''': 1, '''3''': 2}[groups[-2]] lowercase__ = f"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}." lowercase__ = f"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}" lowercase__ = prefix + resnet_block lowercase__ = re_encoder_block_resnet.sub(A , A ) elif re_encoder_block_proj_out.fullmatch(A ): lowercase__ = re_encoder_block_proj_out.match(A ) lowercase__ = regex_match.groups() lowercase__ = f"encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}" lowercase__ = re_encoder_block_proj_out.sub(A , A ) # rename vqvae.decoder keys elif re_decoder_block_conv_out.fullmatch(A ): lowercase__ = re_decoder_block_conv_out.match(A ) lowercase__ = regex_match.groups() lowercase__ = int(groups[2] ) * 2 + int(groups[3] ) - 2 lowercase__ = f"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}" lowercase__ = re_decoder_block_conv_out.sub(A , A ) elif re_decoder_block_resnet.fullmatch(A ): lowercase__ = re_decoder_block_resnet.match(A ) lowercase__ = regex_match.groups() lowercase__ = int(groups[2] ) * 2 + int(groups[3] ) - 2 lowercase__ = {'''1''': 1, '''3''': 2}[groups[-2]] lowercase__ = f"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}." lowercase__ = f"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}" lowercase__ = prefix + resnet_block lowercase__ = re_decoder_block_resnet.sub(A , A ) elif re_decoder_block_proj_in.fullmatch(A ): lowercase__ = re_decoder_block_proj_in.match(A ) lowercase__ = regex_match.groups() lowercase__ = f"decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}" lowercase__ = re_decoder_block_proj_in.sub(A , A ) # rename prior cond.model to upsampler.upsample_block and resnet elif re_prior_cond_conv_out.fullmatch(A ): lowercase__ = re_prior_cond_conv_out.match(A ) lowercase__ = regex_match.groups() lowercase__ = int(groups[1] ) * 2 + int(groups[2] ) - 2 lowercase__ = f"conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}" lowercase__ = re_prior_cond_conv_out.sub(A , A ) elif re_prior_cond_resnet.fullmatch(A ): lowercase__ = re_prior_cond_resnet.match(A ) lowercase__ = regex_match.groups() lowercase__ = int(groups[1] ) * 2 + int(groups[2] ) - 2 lowercase__ = {'''1''': 1, '''3''': 2}[groups[-2]] lowercase__ = f"conditioner_blocks.upsampler.upsample_block.{block_index}." lowercase__ = f"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}" lowercase__ = prefix + resnet_block lowercase__ = re_prior_cond_resnet.sub(A , A ) elif re_prior_cond_proj_in.fullmatch(A ): lowercase__ = re_prior_cond_proj_in.match(A ) lowercase__ = regex_match.groups() lowercase__ = f"conditioner_blocks.upsampler.proj_in.{groups[-1]}" lowercase__ = re_prior_cond_proj_in.sub(A , A ) # keep original key else: lowercase__ = original_key lowercase__ = replace_key(A ) if f"{key_prefix}.{key}" not in model_state_dict or key is None: print(f"failed converting {original_key} to {key}, does not match" ) # handle missmatched shape elif value.shape != model_state_dict[f"{key_prefix}.{key}"].shape: lowercase__ = model_state_dict[f"{key_prefix}.{key}"] print(f"{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match" ) lowercase__ = original_key lowercase__ = original_key lowercase__ = value return new_dict @torch.no_grad() def _SCREAMING_SNAKE_CASE (A=None , A=None ) -> Tuple: """simple docstring""" for file in MODEL_MAPPING[model_name]: if not os.path.isfile(f"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" ): lowercase__ = requests.get(f"{PREFIX}{file}" , allow_redirects=A ) os.makedirs(f"{pytorch_dump_folder_path}/" , exist_ok=A ) open(f"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" , '''wb''' ).write(r.content ) lowercase__ = MODEL_MAPPING[model_name.split('''/''' )[-1]] lowercase__ = JukeboxConfig.from_pretrained(A ) lowercase__ = JukeboxModel(A ) lowercase__ = [] lowercase__ = {} for i, dict_name in enumerate(A ): lowercase__ = torch.load(f"{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}" )['''model'''] lowercase__ = {} for k in old_dic.keys(): if k.endswith('''.b''' ): lowercase__ = old_dic[k] elif k.endswith('''.w''' ): lowercase__ = old_dic[k] elif "level_2" not in dict_name and "cond.model." in k: lowercase__ = old_dic[k] else: lowercase__ = old_dic[k] lowercase__ = '''vqvae''' if i == 0 else f"priors.{3 - i}" lowercase__ = fix_jukebox_keys(A , model.state_dict() , A , A ) weight_dict.append(A ) lowercase__ = weight_dict.pop(0 ) model.vqvae.load_state_dict(A ) for i in range(len(A ) ): model.priors[i].load_state_dict(weight_dict[2 - i] ) Path(A ).mkdir(exist_ok=A ) with open(f"{pytorch_dump_folder_path}/mapping.json" , '''w''' ) as txtfile: json.dump(A , A ) print(f"Saving model {model_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(A ) return weight_dict if __name__ == "__main__": lowerCamelCase : str = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='jukebox-5b-lyrics', type=str, help='Name of the model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default='jukebox-5b-lyrics-converted', type=str, help='Path to the output PyTorch model directory.', ) lowerCamelCase : str = parser.parse_args() convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
460
'''simple docstring''' import argparse import json import os from collections import OrderedDict import numpy as np import tensorflow as tf import torch def _SCREAMING_SNAKE_CASE (A ) -> Dict: """simple docstring""" lowercase__ = os.path.join(args.tf_model_dir , '''parameters.json''' ) lowercase__ = json.loads(open(A ).read() ) if not params: raise ValueError( f"It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file." ) if not args.output.endswith('''.pt''' ): lowercase__ = args.output + '''.pt''' lowercase__ = OrderedDict() with tf.device('''/CPU:0''' ): lowercase__ = tf.train.load_checkpoint(args.tf_model_dir ) lowercase__ = reader.get_variable_to_shape_map() for key_name in shapes.keys(): lowercase__ = reader.get_tensor(A ).astype(np.floataa ) if key_name.endswith('''/adam_m''' ) or key_name.endswith('''/adam_v''' ): continue if key_name.startswith('''pasts/''' ): if key_name.startswith('''pasts/mlp''' ): lowercase__ = int(key_name[9] ) elif key_name.startswith('''pasts/out''' ): lowercase__ = 8 lowercase__ = '''model.sqout.%d.weight''' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowercase__ = torch.tensor(A ) elif key_name.startswith('''model/moe''' ): lowercase__ = int(key_name[9:].split('''/''' )[0] ) if key_name.endswith('''/switch_gating/kernel''' ): lowercase__ = '''model.blocks.%d.feed_forward.mlp.router.classifier.weight''' % player lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowercase__ = torch.tensor(A ) elif key_name.endswith('''/softmlp/kernel''' ): lowercase__ = '''model.blocks.%d.feed_forward.soft_bypass_mlp.weight''' % player lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowercase__ = torch.tensor(A ) elif key_name.endswith('''/wo/kernel''' ) or key_name.endswith('''/wi/kernel''' ): lowercase__ = key_name[-9:-7] for i in range(16 ): lowercase__ = '''model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight''' % (player, i, nlayer) lowercase__ = ( vnp[i].transpose([1, 0] ).copy() ) # In Mesh-Tensorflow, it is one array, so it is divided lowercase__ = torch.tensor(A ) elif key_name.startswith('''model/mlp''' ): lowercase__ = int(key_name[9:].split('''/''' )[0] ) if key_name.endswith('''/p1/kernel''' ): lowercase__ = '''model.blocks.%d.feed_forward.mlp.wi.weight''' % player lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowercase__ = torch.tensor(A ) elif key_name.endswith('''/p1/bias''' ): lowercase__ = '''model.blocks.%d.feed_forward.mlp.wi.bias''' % player lowercase__ = vnp.copy() # same because it is one dimensional lowercase__ = torch.tensor(A ) elif key_name.endswith('''/p2/kernel''' ): lowercase__ = '''model.blocks.%d.feed_forward.mlp.wo.weight''' % player lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowercase__ = torch.tensor(A ) elif key_name.endswith('''/p2/bias''' ): lowercase__ = '''model.blocks.%d.feed_forward.mlp.wo.bias''' % player lowercase__ = vnp.copy() # same because it is one dimensional lowercase__ = torch.tensor(A ) elif key_name.startswith('''model/ln''' ): lowercase__ = int(key_name[8:].split('''/''' )[0] ) if key_name.endswith('''/b''' ): lowercase__ = '''model.blocks.%d.feed_forward.norm.bias''' % player lowercase__ = vnp.copy() # same because it is one dimensional lowercase__ = torch.tensor(A ) elif key_name.endswith('''/g''' ): lowercase__ = '''model.blocks.%d.feed_forward.norm.weight''' % player lowercase__ = vnp.copy() # same because it is one dimensional lowercase__ = torch.tensor(A ) elif key_name.startswith('''model/att''' ): lowercase__ = int(key_name[9:].split('''/''' )[0] ) if key_name.endswith('''/qkv/kernel''' ): lowercase__ = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum lowercase__ = state[:, 0, :, :] lowercase__ = state[:, 1, :, :] lowercase__ = state[:, 2, :, :] lowercase__ = ( state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix lowercase__ = ( state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix lowercase__ = ( state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix lowercase__ = '''model.blocks.%d.self_attn.self_attn.q_proj.weight''' % player lowercase__ = torch.tensor(A ) lowercase__ = '''model.blocks.%d.self_attn.self_attn.k_proj.weight''' % player lowercase__ = torch.tensor(A ) lowercase__ = '''model.blocks.%d.self_attn.self_attn.v_proj.weight''' % player lowercase__ = torch.tensor(A ) elif key_name.endswith('''/o/kernel''' ): lowercase__ = '''model.blocks.%d.self_attn.self_attn.out_proj.weight''' % player lowercase__ = ( vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy() ) # Mesh-Tensorflow is a diagonal matrix lowercase__ = torch.tensor(A ) elif key_name.startswith('''model/an''' ): lowercase__ = int(key_name[8:].split('''/''' )[0] ) if key_name.endswith('''/b''' ): lowercase__ = '''model.blocks.%d.self_attn.norm.bias''' % player lowercase__ = vnp.copy() # same because it is one dimensional lowercase__ = torch.tensor(A ) elif key_name.endswith('''/g''' ): lowercase__ = '''model.blocks.%d.self_attn.norm.weight''' % player lowercase__ = vnp.copy() # same because it is one dimensional lowercase__ = torch.tensor(A ) elif ( key_name.startswith('''model/wte''' ) or key_name.startswith('''model/wpe''' ) or key_name.startswith('''model/ete''' ) ): lowercase__ = {'''wte''': '''embed_tokens''', '''wpe''': '''position_embeddings''', '''ete''': '''extra_position_embeddings'''}[ key_name[-3:] ] lowercase__ = '''model.%s.weight''' % nlayer lowercase__ = vnp.copy() # same in embedded lowercase__ = torch.tensor(A ) if key_name.startswith('''model/wte''' ): lowercase__ = '''lm_head.weight''' lowercase__ = vnp.copy() # same in embedded lowercase__ = torch.tensor(A ) elif key_name.startswith('''model/wob''' ): lowercase__ = '''final_logits_bias''' lowercase__ = vnp.copy() # same in embedded lowercase__ = state.reshape((1, -1) ) lowercase__ = torch.tensor(A ) elif key_name == "model/dense/kernel": lowercase__ = '''model.last_project.weight''' lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowercase__ = torch.tensor(A ) elif key_name == "model/dense_1/bias": lowercase__ = '''model.last_project.bias''' lowercase__ = vnp.copy() # same because it is one dimensional lowercase__ = torch.tensor(A ) torch.save(A , args.output ) if __name__ == "__main__": lowerCamelCase : Optional[int] = argparse.ArgumentParser( description='model converter.', formatter_class=argparse.ArgumentDefaultsHelpFormatter ) parser.add_argument('--tf_model_dir', metavar='PATH', type=str, required=True, help='import model') parser.add_argument('--output', metavar='PATH', type=str, required=True, help='output model') lowerCamelCase : Dict = parser.parse_args() convert_tf_gptsan_to_pt(args)
460
1
import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification def lowerCAmelCase ( UpperCAmelCase ) ->Dict: """simple docstring""" __magic_name__ : int = SwinvaConfig() __magic_name__ : List[Any] = swinva_name.split('''_''' ) __magic_name__ : Union[str, Any] = name_split[1] if "to" in name_split[3]: __magic_name__ : Union[str, Any] = int(name_split[3][-3:] ) else: __magic_name__ : Dict = int(name_split[3] ) if "to" in name_split[2]: __magic_name__ : Union[str, Any] = int(name_split[2][-2:] ) else: __magic_name__ : Optional[int] = int(name_split[2][6:] ) if model_size == "tiny": __magic_name__ : List[str] = 96 __magic_name__ : int = (2, 2, 6, 2) __magic_name__ : Optional[int] = (3, 6, 12, 24) elif model_size == "small": __magic_name__ : Dict = 96 __magic_name__ : Optional[Any] = (2, 2, 18, 2) __magic_name__ : int = (3, 6, 12, 24) elif model_size == "base": __magic_name__ : Tuple = 128 __magic_name__ : Any = (2, 2, 18, 2) __magic_name__ : Union[str, Any] = (4, 8, 16, 32) else: __magic_name__ : List[str] = 192 __magic_name__ : str = (2, 2, 18, 2) __magic_name__ : Any = (6, 12, 24, 48) if "to" in swinva_name: __magic_name__ : Optional[int] = (12, 12, 12, 6) if ("22k" in swinva_name) and ("to" not in swinva_name): __magic_name__ : Tuple = 2_1841 __magic_name__ : List[Any] = '''huggingface/label-files''' __magic_name__ : List[str] = '''imagenet-22k-id2label.json''' __magic_name__ : Optional[int] = json.load(open(hf_hub_download(UpperCAmelCase, UpperCAmelCase, repo_type='''dataset''' ), '''r''' ) ) __magic_name__ : Optional[int] = {int(UpperCAmelCase ): v for k, v in idalabel.items()} __magic_name__ : List[str] = idalabel __magic_name__ : Any = {v: k for k, v in idalabel.items()} else: __magic_name__ : Optional[Any] = 1000 __magic_name__ : Union[str, Any] = '''huggingface/label-files''' __magic_name__ : List[str] = '''imagenet-1k-id2label.json''' __magic_name__ : List[str] = json.load(open(hf_hub_download(UpperCAmelCase, UpperCAmelCase, repo_type='''dataset''' ), '''r''' ) ) __magic_name__ : Optional[Any] = {int(UpperCAmelCase ): v for k, v in idalabel.items()} __magic_name__ : List[str] = idalabel __magic_name__ : Any = {v: k for k, v in idalabel.items()} __magic_name__ : Optional[int] = img_size __magic_name__ : int = num_classes __magic_name__ : List[str] = embed_dim __magic_name__ : List[Any] = depths __magic_name__ : Any = num_heads __magic_name__ : Optional[int] = window_size return config def lowerCAmelCase ( UpperCAmelCase ) ->Dict: """simple docstring""" if "patch_embed.proj" in name: __magic_name__ : List[str] = name.replace('''patch_embed.proj''', '''embeddings.patch_embeddings.projection''' ) if "patch_embed.norm" in name: __magic_name__ : Union[str, Any] = name.replace('''patch_embed.norm''', '''embeddings.norm''' ) if "layers" in name: __magic_name__ : List[str] = '''encoder.''' + name if "attn.proj" in name: __magic_name__ : List[str] = name.replace('''attn.proj''', '''attention.output.dense''' ) if "attn" in name: __magic_name__ : List[Any] = name.replace('''attn''', '''attention.self''' ) if "norm1" in name: __magic_name__ : int = name.replace('''norm1''', '''layernorm_before''' ) if "norm2" in name: __magic_name__ : Optional[Any] = name.replace('''norm2''', '''layernorm_after''' ) if "mlp.fc1" in name: __magic_name__ : Tuple = name.replace('''mlp.fc1''', '''intermediate.dense''' ) if "mlp.fc2" in name: __magic_name__ : List[Any] = name.replace('''mlp.fc2''', '''output.dense''' ) if "q_bias" in name: __magic_name__ : str = name.replace('''q_bias''', '''query.bias''' ) if "k_bias" in name: __magic_name__ : str = name.replace('''k_bias''', '''key.bias''' ) if "v_bias" in name: __magic_name__ : Tuple = name.replace('''v_bias''', '''value.bias''' ) if "cpb_mlp" in name: __magic_name__ : List[Any] = name.replace('''cpb_mlp''', '''continuous_position_bias_mlp''' ) if name == "norm.weight": __magic_name__ : Dict = '''layernorm.weight''' if name == "norm.bias": __magic_name__ : int = '''layernorm.bias''' if "head" in name: __magic_name__ : Optional[Any] = name.replace('''head''', '''classifier''' ) else: __magic_name__ : Optional[int] = '''swinv2.''' + name return name def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase ) ->int: """simple docstring""" for key in orig_state_dict.copy().keys(): __magic_name__ : int = orig_state_dict.pop(UpperCAmelCase ) if "mask" in key: continue elif "qkv" in key: __magic_name__ : Union[str, Any] = key.split('''.''' ) __magic_name__ : Any = int(key_split[1] ) __magic_name__ : Optional[Any] = int(key_split[3] ) __magic_name__ : Optional[int] = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: __magic_name__ : str = val[:dim, :] __magic_name__ : Optional[Any] = val[dim : dim * 2, :] __magic_name__ : Any = val[-dim:, :] else: __magic_name__ : Any = val[:dim] __magic_name__ : List[Any] = val[ dim : dim * 2 ] __magic_name__ : int = val[-dim:] else: __magic_name__ : str = val return orig_state_dict def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase ) ->Any: """simple docstring""" __magic_name__ : Tuple = timm.create_model(UpperCAmelCase, pretrained=UpperCAmelCase ) timm_model.eval() __magic_name__ : Dict = get_swinva_config(UpperCAmelCase ) __magic_name__ : int = SwinvaForImageClassification(UpperCAmelCase ) model.eval() __magic_name__ : Optional[Any] = convert_state_dict(timm_model.state_dict(), UpperCAmelCase ) model.load_state_dict(UpperCAmelCase ) __magic_name__ : Tuple = '''http://images.cocodataset.org/val2017/000000039769.jpg''' __magic_name__ : Optional[Any] = AutoImageProcessor.from_pretrained('''microsoft/{}'''.format(swinva_name.replace('''_''', '''-''' ) ) ) __magic_name__ : List[str] = Image.open(requests.get(UpperCAmelCase, stream=UpperCAmelCase ).raw ) __magic_name__ : Optional[Any] = image_processor(images=UpperCAmelCase, return_tensors='''pt''' ) __magic_name__ : Optional[Any] = timm_model(inputs['''pixel_values'''] ) __magic_name__ : Tuple = model(**UpperCAmelCase ).logits assert torch.allclose(UpperCAmelCase, UpperCAmelCase, atol=1E-3 ) print(F'''Saving model {swinva_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(UpperCAmelCase ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(UpperCAmelCase ) model.push_to_hub( repo_path_or_name=Path(UpperCAmelCase, UpperCAmelCase ), organization='''nandwalritik''', commit_message='''Add model''', ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--swinv2_name''', default='''swinv2_tiny_patch4_window8_256''', type=str, help='''Name of the Swinv2 timm model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) lowercase_ = parser.parse_args() convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
336
import unittest import numpy as np from transformers.testing_utils import require_flax, require_tf, require_torch from transformers.utils import ( expand_dims, flatten_dict, is_flax_available, is_tf_available, is_torch_available, reshape, squeeze, transpose, ) if is_flax_available(): import jax.numpy as jnp if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch class A__ ( unittest.TestCase ): def lowercase ( self ) -> Dict: """simple docstring""" __magic_name__ : List[Any] = { '''task_specific_params''': { '''summarization''': {'''length_penalty''': 1.0, '''max_length''': 128, '''min_length''': 12, '''num_beams''': 4}, '''summarization_cnn''': {'''length_penalty''': 2.0, '''max_length''': 142, '''min_length''': 56, '''num_beams''': 4}, '''summarization_xsum''': {'''length_penalty''': 1.0, '''max_length''': 62, '''min_length''': 11, '''num_beams''': 6}, } } __magic_name__ : int = { '''task_specific_params.summarization.length_penalty''': 1.0, '''task_specific_params.summarization.max_length''': 128, '''task_specific_params.summarization.min_length''': 12, '''task_specific_params.summarization.num_beams''': 4, '''task_specific_params.summarization_cnn.length_penalty''': 2.0, '''task_specific_params.summarization_cnn.max_length''': 142, '''task_specific_params.summarization_cnn.min_length''': 56, '''task_specific_params.summarization_cnn.num_beams''': 4, '''task_specific_params.summarization_xsum.length_penalty''': 1.0, '''task_specific_params.summarization_xsum.max_length''': 62, '''task_specific_params.summarization_xsum.min_length''': 11, '''task_specific_params.summarization_xsum.num_beams''': 6, } self.assertEqual(flatten_dict(lowerCamelCase ) , lowerCamelCase ) def lowercase ( self ) -> Tuple: """simple docstring""" __magic_name__ : Optional[Any] = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(transpose(lowerCamelCase ) , x.transpose() ) ) __magic_name__ : Union[str, Any] = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(transpose(lowerCamelCase , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) ) @require_torch def lowercase ( self ) -> Optional[int]: """simple docstring""" __magic_name__ : Union[str, Any] = np.random.randn(3 , 4 ) __magic_name__ : List[str] = torch.tensor(lowerCamelCase ) self.assertTrue(np.allclose(transpose(lowerCamelCase ) , transpose(lowerCamelCase ).numpy() ) ) __magic_name__ : int = np.random.randn(3 , 4 , 5 ) __magic_name__ : Union[str, Any] = torch.tensor(lowerCamelCase ) self.assertTrue(np.allclose(transpose(lowerCamelCase , axes=(1, 2, 0) ) , transpose(lowerCamelCase , axes=(1, 2, 0) ).numpy() ) ) @require_tf def lowercase ( self ) -> Tuple: """simple docstring""" __magic_name__ : Dict = np.random.randn(3 , 4 ) __magic_name__ : Any = tf.constant(lowerCamelCase ) self.assertTrue(np.allclose(transpose(lowerCamelCase ) , transpose(lowerCamelCase ).numpy() ) ) __magic_name__ : str = np.random.randn(3 , 4 , 5 ) __magic_name__ : Optional[int] = tf.constant(lowerCamelCase ) self.assertTrue(np.allclose(transpose(lowerCamelCase , axes=(1, 2, 0) ) , transpose(lowerCamelCase , axes=(1, 2, 0) ).numpy() ) ) @require_flax def lowercase ( self ) -> int: """simple docstring""" __magic_name__ : Union[str, Any] = np.random.randn(3 , 4 ) __magic_name__ : Optional[Any] = jnp.array(lowerCamelCase ) self.assertTrue(np.allclose(transpose(lowerCamelCase ) , np.asarray(transpose(lowerCamelCase ) ) ) ) __magic_name__ : int = np.random.randn(3 , 4 , 5 ) __magic_name__ : Tuple = jnp.array(lowerCamelCase ) self.assertTrue(np.allclose(transpose(lowerCamelCase , axes=(1, 2, 0) ) , np.asarray(transpose(lowerCamelCase , axes=(1, 2, 0) ) ) ) ) def lowercase ( self ) -> Optional[Any]: """simple docstring""" __magic_name__ : Dict = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(reshape(lowerCamelCase , (4, 3) ) , np.reshape(lowerCamelCase , (4, 3) ) ) ) __magic_name__ : Optional[int] = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(reshape(lowerCamelCase , (12, 5) ) , np.reshape(lowerCamelCase , (12, 5) ) ) ) @require_torch def lowercase ( self ) -> int: """simple docstring""" __magic_name__ : Tuple = np.random.randn(3 , 4 ) __magic_name__ : List[Any] = torch.tensor(lowerCamelCase ) self.assertTrue(np.allclose(reshape(lowerCamelCase , (4, 3) ) , reshape(lowerCamelCase , (4, 3) ).numpy() ) ) __magic_name__ : List[str] = np.random.randn(3 , 4 , 5 ) __magic_name__ : Tuple = torch.tensor(lowerCamelCase ) self.assertTrue(np.allclose(reshape(lowerCamelCase , (12, 5) ) , reshape(lowerCamelCase , (12, 5) ).numpy() ) ) @require_tf def lowercase ( self ) -> Union[str, Any]: """simple docstring""" __magic_name__ : Union[str, Any] = np.random.randn(3 , 4 ) __magic_name__ : List[str] = tf.constant(lowerCamelCase ) self.assertTrue(np.allclose(reshape(lowerCamelCase , (4, 3) ) , reshape(lowerCamelCase , (4, 3) ).numpy() ) ) __magic_name__ : Optional[Any] = np.random.randn(3 , 4 , 5 ) __magic_name__ : str = tf.constant(lowerCamelCase ) self.assertTrue(np.allclose(reshape(lowerCamelCase , (12, 5) ) , reshape(lowerCamelCase , (12, 5) ).numpy() ) ) @require_flax def lowercase ( self ) -> Tuple: """simple docstring""" __magic_name__ : Dict = np.random.randn(3 , 4 ) __magic_name__ : Optional[Any] = jnp.array(lowerCamelCase ) self.assertTrue(np.allclose(reshape(lowerCamelCase , (4, 3) ) , np.asarray(reshape(lowerCamelCase , (4, 3) ) ) ) ) __magic_name__ : Union[str, Any] = np.random.randn(3 , 4 , 5 ) __magic_name__ : List[Any] = jnp.array(lowerCamelCase ) self.assertTrue(np.allclose(reshape(lowerCamelCase , (12, 5) ) , np.asarray(reshape(lowerCamelCase , (12, 5) ) ) ) ) def lowercase ( self ) -> Dict: """simple docstring""" __magic_name__ : Optional[Any] = np.random.randn(1 , 3 , 4 ) self.assertTrue(np.allclose(squeeze(lowerCamelCase ) , np.squeeze(lowerCamelCase ) ) ) __magic_name__ : int = np.random.randn(1 , 4 , 1 , 5 ) self.assertTrue(np.allclose(squeeze(lowerCamelCase , axis=2 ) , np.squeeze(lowerCamelCase , axis=2 ) ) ) @require_torch def lowercase ( self ) -> List[Any]: """simple docstring""" __magic_name__ : Any = np.random.randn(1 , 3 , 4 ) __magic_name__ : List[str] = torch.tensor(lowerCamelCase ) self.assertTrue(np.allclose(squeeze(lowerCamelCase ) , squeeze(lowerCamelCase ).numpy() ) ) __magic_name__ : Union[str, Any] = np.random.randn(1 , 4 , 1 , 5 ) __magic_name__ : Tuple = torch.tensor(lowerCamelCase ) self.assertTrue(np.allclose(squeeze(lowerCamelCase , axis=2 ) , squeeze(lowerCamelCase , axis=2 ).numpy() ) ) @require_tf def lowercase ( self ) -> str: """simple docstring""" __magic_name__ : Optional[int] = np.random.randn(1 , 3 , 4 ) __magic_name__ : Any = tf.constant(lowerCamelCase ) self.assertTrue(np.allclose(squeeze(lowerCamelCase ) , squeeze(lowerCamelCase ).numpy() ) ) __magic_name__ : int = np.random.randn(1 , 4 , 1 , 5 ) __magic_name__ : str = tf.constant(lowerCamelCase ) self.assertTrue(np.allclose(squeeze(lowerCamelCase , axis=2 ) , squeeze(lowerCamelCase , axis=2 ).numpy() ) ) @require_flax def lowercase ( self ) -> List[Any]: """simple docstring""" __magic_name__ : str = np.random.randn(1 , 3 , 4 ) __magic_name__ : List[str] = jnp.array(lowerCamelCase ) self.assertTrue(np.allclose(squeeze(lowerCamelCase ) , np.asarray(squeeze(lowerCamelCase ) ) ) ) __magic_name__ : Optional[int] = np.random.randn(1 , 4 , 1 , 5 ) __magic_name__ : Optional[int] = jnp.array(lowerCamelCase ) self.assertTrue(np.allclose(squeeze(lowerCamelCase , axis=2 ) , np.asarray(squeeze(lowerCamelCase , axis=2 ) ) ) ) def lowercase ( self ) -> Optional[int]: """simple docstring""" __magic_name__ : Tuple = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(expand_dims(lowerCamelCase , axis=1 ) , np.expand_dims(lowerCamelCase , axis=1 ) ) ) @require_torch def lowercase ( self ) -> List[Any]: """simple docstring""" __magic_name__ : Union[str, Any] = np.random.randn(3 , 4 ) __magic_name__ : str = torch.tensor(lowerCamelCase ) self.assertTrue(np.allclose(expand_dims(lowerCamelCase , axis=1 ) , expand_dims(lowerCamelCase , axis=1 ).numpy() ) ) @require_tf def lowercase ( self ) -> Any: """simple docstring""" __magic_name__ : List[str] = np.random.randn(3 , 4 ) __magic_name__ : Union[str, Any] = tf.constant(lowerCamelCase ) self.assertTrue(np.allclose(expand_dims(lowerCamelCase , axis=1 ) , expand_dims(lowerCamelCase , axis=1 ).numpy() ) ) @require_flax def lowercase ( self ) -> Optional[Any]: """simple docstring""" __magic_name__ : List[Any] = np.random.randn(3 , 4 ) __magic_name__ : int = jnp.array(lowerCamelCase ) self.assertTrue(np.allclose(expand_dims(lowerCamelCase , axis=1 ) , np.asarray(expand_dims(lowerCamelCase , axis=1 ) ) ) )
336
1
import unittest from transformers import BigBirdConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax from transformers.models.big_bird.modeling_flax_big_bird import ( FlaxBigBirdForCausalLM, FlaxBigBirdForMaskedLM, FlaxBigBirdForMultipleChoice, FlaxBigBirdForPreTraining, FlaxBigBirdForQuestionAnswering, FlaxBigBirdForSequenceClassification, FlaxBigBirdForTokenClassification, FlaxBigBirdModel, ) class lowercase ( unittest.TestCase ): def __init__( self : Dict , _lowercase : Tuple , _lowercase : Optional[Any]=2 , _lowercase : int=56 , _lowercase : List[str]=True , _lowercase : Tuple=True , _lowercase : Optional[Any]=True , _lowercase : Optional[Any]=True , _lowercase : List[str]=99 , _lowercase : Dict=32 , _lowercase : Dict=2 , _lowercase : str=2 , _lowercase : Union[str, Any]=7 , _lowercase : Union[str, Any]="gelu_new" , _lowercase : List[Any]=0.1 , _lowercase : str=0.1 , _lowercase : Dict=5_12 , _lowercase : List[Any]=16 , _lowercase : Optional[int]=2 , _lowercase : Dict=0.02 , _lowercase : Optional[int]=4 , _lowercase : str="block_sparse" , _lowercase : str=True , _lowercase : List[str]=False , _lowercase : Any=2 , _lowercase : Tuple=3 , ): SCREAMING_SNAKE_CASE__ : Any = parent SCREAMING_SNAKE_CASE__ : int = batch_size SCREAMING_SNAKE_CASE__ : Union[str, Any] = seq_length SCREAMING_SNAKE_CASE__ : Tuple = is_training SCREAMING_SNAKE_CASE__ : Optional[Any] = use_attention_mask SCREAMING_SNAKE_CASE__ : int = use_token_type_ids SCREAMING_SNAKE_CASE__ : Dict = use_labels SCREAMING_SNAKE_CASE__ : List[str] = vocab_size SCREAMING_SNAKE_CASE__ : Any = hidden_size SCREAMING_SNAKE_CASE__ : Tuple = num_hidden_layers SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_attention_heads SCREAMING_SNAKE_CASE__ : Any = intermediate_size SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_act SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_dropout_prob SCREAMING_SNAKE_CASE__ : Union[str, Any] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ : List[Any] = max_position_embeddings SCREAMING_SNAKE_CASE__ : List[Any] = type_vocab_size SCREAMING_SNAKE_CASE__ : Any = type_sequence_label_size SCREAMING_SNAKE_CASE__ : Optional[Any] = initializer_range SCREAMING_SNAKE_CASE__ : Optional[Any] = num_choices SCREAMING_SNAKE_CASE__ : Union[str, Any] = rescale_embeddings SCREAMING_SNAKE_CASE__ : Tuple = attention_type SCREAMING_SNAKE_CASE__ : List[Any] = use_bias SCREAMING_SNAKE_CASE__ : int = block_size SCREAMING_SNAKE_CASE__ : Dict = num_random_blocks def lowercase__ ( self : List[str] ): SCREAMING_SNAKE_CASE__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE__ : List[Any] = None if self.use_attention_mask: SCREAMING_SNAKE_CASE__ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] ) SCREAMING_SNAKE_CASE__ : Optional[Any] = None if self.use_token_type_ids: SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) SCREAMING_SNAKE_CASE__ : Optional[int] = BigBirdConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , ) return config, input_ids, token_type_ids, attention_mask def lowercase__ ( self : Optional[int] ): SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = config_and_inputs SCREAMING_SNAKE_CASE__ : Dict = { '''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask, } return config, inputs_dict @require_flax class lowercase ( _UpperCAmelCase , unittest.TestCase ): lowerCamelCase : str = ( ( FlaxBigBirdForCausalLM, FlaxBigBirdModel, FlaxBigBirdForPreTraining, FlaxBigBirdForMaskedLM, FlaxBigBirdForMultipleChoice, FlaxBigBirdForQuestionAnswering, FlaxBigBirdForSequenceClassification, FlaxBigBirdForTokenClassification, ) if is_flax_available() else () ) lowerCamelCase : int = False lowerCamelCase : Optional[int] = False def lowercase__ ( self : Tuple ): SCREAMING_SNAKE_CASE__ : Tuple = FlaxBigBirdModelTester(self ) @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def lowercase__ ( self : Optional[int] ): super().test_from_pretrained_save_pretrained() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def lowercase__ ( self : Tuple ): super().test_from_pretrained_with_no_automatic_init() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def lowercase__ ( self : Optional[int] ): super().test_no_automatic_init() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def lowercase__ ( self : Tuple ): super().test_hidden_states_output() @slow def lowercase__ ( self : Tuple ): for model_class_name in self.all_model_classes: SCREAMING_SNAKE_CASE__ : Tuple = model_class_name.from_pretrained('''google/bigbird-roberta-base''' ) self.assertIsNotNone(_lowercase ) def lowercase__ ( self : Union[str, Any] ): if self.test_attn_probs: super().test_attention_outputs() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def lowercase__ ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): SCREAMING_SNAKE_CASE__ : str = self._prepare_for_class(_lowercase , _lowercase ) SCREAMING_SNAKE_CASE__ : int = model_class(_lowercase ) @jax.jit def model_jitted(_lowercase : Optional[int] , _lowercase : List[Any]=None , **_lowercase : Tuple ): return model(input_ids=_lowercase , attention_mask=_lowercase , **_lowercase ) with self.subTest('''JIT Enabled''' ): SCREAMING_SNAKE_CASE__ : Union[str, Any] = model_jitted(**_lowercase ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): SCREAMING_SNAKE_CASE__ : Optional[Any] = model_jitted(**_lowercase ).to_tuple() self.assertEqual(len(_lowercase ) , len(_lowercase ) ) for jitted_output, output in zip(_lowercase , _lowercase ): self.assertEqual(jitted_output.shape , output.shape ) def lowercase__ ( self : List[Any] , _lowercase : Dict , _lowercase : Optional[Any] , _lowercase : str , _lowercase : Tuple=1E-5 , _lowercase : Dict="outputs" , _lowercase : Optional[Any]=None ): # `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version, # an effort was done to return `attention_probs` (yet to be verified). if name.startswith('''outputs.attentions''' ): return else: super().check_pt_flax_outputs(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
35
'''simple docstring''' import json import os from typing import Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCAmelCase__ : List[Any] = logging.get_logger(__name__) UpperCAmelCase__ : List[str] = {"vocab_file": "vocab.json"} UpperCAmelCase__ : Optional[Any] = { "vocab_file": { "mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json", } } UpperCAmelCase__ : Union[str, Any] = {"mgp-str": 27} class A ( SCREAMING_SNAKE_CASE__ ): snake_case__ :Any = VOCAB_FILES_NAMES snake_case__ :Dict = PRETRAINED_VOCAB_FILES_MAP snake_case__ :Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : int="[GO]" , __magic_name__ : Optional[Any]="[GO]" , __magic_name__ : List[str]="[s]" , __magic_name__ : str="[GO]" , **__magic_name__ : List[Any] ): """simple docstring""" super().__init__( unk_token=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , pad_token=__magic_name__ , **__magic_name__ , ) with open(__magic_name__ , encoding="utf-8" ) as vocab_handle: lowerCAmelCase__ = json.load(__magic_name__ ) lowerCAmelCase__ = {v: k for k, v in self.vocab.items()} @property def __SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" return len(self.vocab ) def __SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" return dict(self.vocab , **self.added_tokens_encoder ) def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : Dict ): """simple docstring""" lowerCAmelCase__ = [] for s in text: char_tokens.extend(__magic_name__ ) return char_tokens def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : str ): """simple docstring""" return self.vocab.get(__magic_name__ , self.vocab.get(self.unk_token ) ) def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : Tuple ): """simple docstring""" return self.decoder.get(__magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : str , __magic_name__ : Optional[str] = None ): """simple docstring""" if not os.path.isdir(__magic_name__ ): logger.error("Vocabulary path ({}) should be a directory".format(__magic_name__ ) ) return lowerCAmelCase__ = os.path.join( __magic_name__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) with open(__magic_name__ , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.vocab , indent=2 , sort_keys=__magic_name__ , ensure_ascii=__magic_name__ ) + "\n" ) return (vocab_file,)
48
0
'''simple docstring''' import re import tempfile from pathlib import Path import pytest import yaml from datasets.utils.readme import ReadMe # @pytest.fixture # def example_yaml_structure(): A_ : Optional[Any] = yaml.safe_load( "\\nname: \"\"\nallow_empty: false\nallow_empty_text: true\nsubsections:\n - name: \"Dataset Card for X\" # First-level markdown heading\n allow_empty: false\n allow_empty_text: true\n subsections:\n - name: \"Table of Contents\"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: \"Dataset Description\"\n allow_empty: false\n allow_empty_text: false\n subsections:\n - name: \"Dataset Summary\"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: \"Supported Tasks and Leaderboards\"\n allow_empty: true\n allow_empty_text: true\n subsections: null\n - name: Languages\n allow_empty: false\n allow_empty_text: true\n subsections: null\n" ) A_ : Optional[int] = { '''name''': '''root''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [ { '''name''': '''Dataset Card for My Dataset''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [ {'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []}, { '''name''': '''Dataset Description''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': [ { '''name''': '''Dataset Summary''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': [], }, { '''name''': '''Supported Tasks and Leaderboards''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [], }, {'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []}, ], }, ], } ], } A_ : List[Any] = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' A_ : Optional[int] = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. #### Extra Ignored Subsection ### Supported Tasks and Leaderboards ### Languages Language Text ''' A_ : Any = { '''name''': '''root''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [ { '''name''': '''Dataset Card for My Dataset''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [ {'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []}, { '''name''': '''Dataset Description''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': [ { '''name''': '''Dataset Summary''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': [ { '''name''': '''Extra Ignored Subsection''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [], } ], }, { '''name''': '''Supported Tasks and Leaderboards''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [], }, {'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []}, ], }, ], } ], } A_ : Optional[Any] = '''\ --- --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' A_ : Dict = ( '''The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.''' ) A_ : List[str] = '''\ # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' A_ : List[str] = ( '''The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.''' ) A_ : Optional[Any] = '''\ --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' A_ : str = '''The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.''' A_ : List[str] = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary ### Supported Tasks and Leaderboards ### Languages Language Text ''' A_ : Optional[Any] = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).''' A_ : Dict = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ''' A_ : Union[str, Any] = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.''' A_ : Any = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Languages Language Text ''' A_ : List[str] = '''The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.''' A_ : Tuple = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages ''' A_ : str = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.''' A_ : Any = '''\ --- language: - zh - en --- ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' A_ : str = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.''' A_ : Optional[Any] = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text # Dataset Card My Dataset ''' A_ : List[Any] = '''The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.''' A_ : Tuple = '''\ --- language: - zh - en --- # Dataset Card My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' A_ : Union[str, Any] = '''The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.''' A_ : Union[str, Any] = '''''' A_ : Any = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.''' A_ : int = '''\ --- language: - zh - en --- # Dataset Card for My Dataset # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' A_ : Optional[Any] = '''The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.''' @pytest.mark.parametrize( """readme_md, expected_dict""" , [ (README_CORRECT, CORRECT_DICT), (README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL), ] , ) def UpperCamelCase__ ( __magic_name__ : str , __magic_name__ : Optional[Any] ) -> Union[str, Any]: '''simple docstring''' assert ReadMe.from_string(_lowercase , _lowercase ).to_dict() == expected_dict @pytest.mark.parametrize( """readme_md, expected_error""" , [ (README_NO_YAML, EXPECTED_ERROR_README_NO_YAML), (README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML), (README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML), (README_EMPTY, EXPECTED_ERROR_README_EMPTY), (README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION), (README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL), (README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION), (README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT), (README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL), (README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL), (README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT), ] , ) def UpperCamelCase__ ( __magic_name__ : Tuple , __magic_name__ : List[Any] ) -> Tuple: '''simple docstring''' with pytest.raises(_lowercase , match=re.escape(expected_error.format(path="""root""" ) ) ): snake_case__ : Optional[Any] = ReadMe.from_string(_lowercase , _lowercase ) readme.validate() @pytest.mark.parametrize( """readme_md, expected_error""" , [ (README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1), ] , ) def UpperCamelCase__ ( __magic_name__ : List[str] , __magic_name__ : List[Any] ) -> Any: '''simple docstring''' with pytest.raises(_lowercase , match=re.escape(expected_error.format(path="""root""" ) ) ): ReadMe.from_string(_lowercase , _lowercase ) @pytest.mark.parametrize( """readme_md,""" , [ (README_MULTIPLE_SAME_HEADING_1), ] , ) def UpperCamelCase__ ( __magic_name__ : Optional[Any] ) -> str: '''simple docstring''' ReadMe.from_string(_lowercase , _lowercase , suppress_parsing_errors=_lowercase ) @pytest.mark.parametrize( """readme_md, expected_dict""" , [ (README_CORRECT, CORRECT_DICT), (README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL), ] , ) def UpperCamelCase__ ( __magic_name__ : str , __magic_name__ : List[str] ) -> List[Any]: '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: snake_case__ : List[Any] = Path(_lowercase ) / 'README.md' with open(_lowercase , """w+""" ) as readme_file: readme_file.write(_lowercase ) snake_case__ : List[Any] = ReadMe.from_readme(_lowercase , _lowercase ).to_dict() assert out["name"] == path assert out["text"] == "" assert out["is_empty_text"] assert out["subsections"] == expected_dict["subsections"] @pytest.mark.parametrize( """readme_md, expected_error""" , [ (README_NO_YAML, EXPECTED_ERROR_README_NO_YAML), (README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML), (README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML), (README_EMPTY, EXPECTED_ERROR_README_EMPTY), (README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION), (README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL), (README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION), (README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT), (README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL), (README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL), (README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT), ] , ) def UpperCamelCase__ ( __magic_name__ : Optional[int] , __magic_name__ : str ) -> Optional[int]: '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: snake_case__ : str = Path(_lowercase ) / 'README.md' with open(_lowercase , """w+""" ) as readme_file: readme_file.write(_lowercase ) snake_case__ : Optional[Any] = expected_error.format(path=_lowercase ) with pytest.raises(_lowercase , match=re.escape(_lowercase ) ): snake_case__ : int = ReadMe.from_readme(_lowercase , _lowercase ) readme.validate() @pytest.mark.parametrize( """readme_md, expected_error""" , [ (README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1), ] , ) def UpperCamelCase__ ( __magic_name__ : int , __magic_name__ : Any ) -> str: '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: snake_case__ : Optional[Any] = Path(_lowercase ) / 'README.md' with open(_lowercase , """w+""" ) as readme_file: readme_file.write(_lowercase ) snake_case__ : Optional[Any] = expected_error.format(path=_lowercase ) with pytest.raises(_lowercase , match=re.escape(_lowercase ) ): ReadMe.from_readme(_lowercase , _lowercase ) @pytest.mark.parametrize( """readme_md,""" , [ (README_MULTIPLE_SAME_HEADING_1), ] , ) def UpperCamelCase__ ( __magic_name__ : int ) -> str: '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: snake_case__ : Optional[Any] = Path(_lowercase ) / 'README.md' with open(_lowercase , """w+""" ) as readme_file: readme_file.write(_lowercase ) ReadMe.from_readme(_lowercase , _lowercase , suppress_parsing_errors=_lowercase )
704
'''simple docstring''' class __snake_case : '''simple docstring''' def __init__( self , __SCREAMING_SNAKE_CASE ): snake_case__ : Dict = val snake_case__ : List[str] = None snake_case__ : Tuple = None def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ): if self.val: if val < self.val: if self.left is None: snake_case__ : Any = Node(__SCREAMING_SNAKE_CASE ) else: self.left.insert(__SCREAMING_SNAKE_CASE ) elif val > self.val: if self.right is None: snake_case__ : List[Any] = Node(__SCREAMING_SNAKE_CASE ) else: self.right.insert(__SCREAMING_SNAKE_CASE ) else: snake_case__ : Tuple = val def UpperCamelCase__ ( __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] ) -> List[Any]: '''simple docstring''' if root: inorder(root.left , __magic_name__ ) res.append(root.val ) inorder(root.right , __magic_name__ ) def UpperCamelCase__ ( __magic_name__ : Union[str, Any] ) -> str: '''simple docstring''' if len(__magic_name__ ) == 0: return arr snake_case__ : int = Node(arr[0] ) for i in range(1 , len(__magic_name__ ) ): root.insert(arr[i] ) # Traverse BST in order. snake_case__ : str = [] inorder(__magic_name__ , __magic_name__ ) return res if __name__ == "__main__": print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
419
0
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowerCAmelCase__ : Union[str, Any] =logging.get_logger(__name__) class __lowercase (__SCREAMING_SNAKE_CASE ): """simple docstring""" _UpperCAmelCase = ["""pixel_values"""] def __init__( self , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = PILImageResampling.BILINEAR , lowerCAmelCase__ = True , lowerCAmelCase__ = 1 / 2_5_5 , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ): """simple docstring""" super().__init__(**lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : Dict = size if size is not None else {'shortest_edge': 3_8_4} SCREAMING_SNAKE_CASE_ : List[str] = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : List[Any] = do_resize SCREAMING_SNAKE_CASE_ : Union[str, Any] = size # Default value set here for backwards compatibility where the value in config is None SCREAMING_SNAKE_CASE_ : str = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6 SCREAMING_SNAKE_CASE_ : Dict = resample SCREAMING_SNAKE_CASE_ : int = do_rescale SCREAMING_SNAKE_CASE_ : Union[str, Any] = rescale_factor SCREAMING_SNAKE_CASE_ : List[Any] = do_normalize SCREAMING_SNAKE_CASE_ : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN SCREAMING_SNAKE_CASE_ : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = PILImageResampling.BICUBIC , lowerCAmelCase__ = None , **lowerCAmelCase__ , ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[Any] = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ ) if "shortest_edge" not in size: raise ValueError(F'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' ) SCREAMING_SNAKE_CASE_ : List[Any] = size['shortest_edge'] if shortest_edge < 3_8_4: # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct SCREAMING_SNAKE_CASE_ : Tuple = int(shortest_edge / crop_pct ) SCREAMING_SNAKE_CASE_ : List[str] = get_resize_output_image_size(lowerCAmelCase__ , size=lowerCAmelCase__ , default_to_square=lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : List[Any] = resize(image=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ ) # then crop to (shortest_edge, shortest_edge) return center_crop(image=lowerCAmelCase__ , size=(shortest_edge, shortest_edge) , data_format=lowerCAmelCase__ , **lowerCAmelCase__ ) else: # warping (no cropping) when evaluated at 384 or larger return resize( lowerCAmelCase__ , size=(shortest_edge, shortest_edge) , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ ) def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , **lowerCAmelCase__ , ): """simple docstring""" return rescale(lowerCAmelCase__ , scale=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ ) def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , **lowerCAmelCase__ , ): """simple docstring""" return normalize(lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ ) def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = ChannelDimension.FIRST , **lowerCAmelCase__ , ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = do_resize if do_resize is not None else self.do_resize SCREAMING_SNAKE_CASE_ : Any = crop_pct if crop_pct is not None else self.crop_pct SCREAMING_SNAKE_CASE_ : str = resample if resample is not None else self.resample SCREAMING_SNAKE_CASE_ : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale SCREAMING_SNAKE_CASE_ : str = rescale_factor if rescale_factor is not None else self.rescale_factor SCREAMING_SNAKE_CASE_ : List[str] = do_normalize if do_normalize is not None else self.do_normalize SCREAMING_SNAKE_CASE_ : Optional[int] = image_mean if image_mean is not None else self.image_mean SCREAMING_SNAKE_CASE_ : List[Any] = image_std if image_std is not None else self.image_std SCREAMING_SNAKE_CASE_ : Union[str, Any] = size if size is not None else self.size SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : List[Any] = make_list_of_images(lowerCAmelCase__ ) if not valid_images(lowerCAmelCase__ ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None or resample is None: raise ValueError('Size and resample must be specified if do_resize is True.' ) if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None: raise ValueError('crop_pct must be specified if size < 384.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # All transformations expect numpy arrays. SCREAMING_SNAKE_CASE_ : Optional[int] = [to_numpy_array(lowerCAmelCase__ ) for image in images] if do_resize: SCREAMING_SNAKE_CASE_ : List[Any] = [self.resize(image=lowerCAmelCase__ , size=lowerCAmelCase__ , crop_pct=lowerCAmelCase__ , resample=lowerCAmelCase__ ) for image in images] if do_rescale: SCREAMING_SNAKE_CASE_ : int = [self.rescale(image=lowerCAmelCase__ , scale=lowerCAmelCase__ ) for image in images] if do_normalize: SCREAMING_SNAKE_CASE_ : Optional[int] = [self.normalize(image=lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ ) for image in images] SCREAMING_SNAKE_CASE_ : str = [to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__ ) for image in images] SCREAMING_SNAKE_CASE_ : int = {'pixel_values': images} return BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__ )
101
'''simple docstring''' import json import os import unittest from transformers import DebertaTokenizer, DebertaTokenizerFast from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = DebertaTokenizer UpperCAmelCase__ = True UpperCAmelCase__ = DebertaTokenizerFast def snake_case__ ( self : List[str] ) ->List[Any]: '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt _UpperCamelCase : Optional[Any] = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "[UNK]", ] _UpperCamelCase : str = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) ) _UpperCamelCase : Any = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] _UpperCamelCase : Union[str, Any] = {"unk_token": "[UNK]"} _UpperCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) _UpperCamelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(lowercase__ ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(lowercase__ ) ) def snake_case__ ( self : Dict , **lowercase__ : str ) ->List[str]: '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase__ ) def snake_case__ ( self : Tuple , lowercase__ : Tuple ) ->Any: '''simple docstring''' _UpperCamelCase : List[Any] = "lower newer" _UpperCamelCase : Optional[Any] = "lower newer" return input_text, output_text def snake_case__ ( self : Optional[int] ) ->Dict: '''simple docstring''' _UpperCamelCase : Dict = self.get_tokenizer() _UpperCamelCase : List[str] = "lower newer" _UpperCamelCase : List[Any] = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"] _UpperCamelCase : Dict = tokenizer.tokenize(lowercase__ ) self.assertListEqual(lowercase__ , lowercase__ ) _UpperCamelCase : Optional[Any] = tokens + [tokenizer.unk_token] _UpperCamelCase : List[str] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__ ) , lowercase__ ) def snake_case__ ( self : Dict ) ->Tuple: '''simple docstring''' _UpperCamelCase : Optional[int] = self.get_tokenizer() _UpperCamelCase : Optional[int] = tokenizer("Hello" , "World" ) _UpperCamelCase : Optional[int] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1] self.assertListEqual(tokd["token_type_ids"] , lowercase__ ) @slow def snake_case__ ( self : Any ) ->Union[str, Any]: '''simple docstring''' _UpperCamelCase : Optional[int] = self.tokenizer_class.from_pretrained("microsoft/deberta-base" ) _UpperCamelCase : int = tokenizer.encode("sequence builders" , add_special_tokens=lowercase__ ) _UpperCamelCase : Any = tokenizer.encode("multi-sequence build" , add_special_tokens=lowercase__ ) _UpperCamelCase : Any = tokenizer.encode( "sequence builders" , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ ) _UpperCamelCase : Dict = tokenizer.encode( "sequence builders" , "multi-sequence build" , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ ) _UpperCamelCase : Dict = tokenizer.build_inputs_with_special_tokens(lowercase__ ) _UpperCamelCase : Dict = tokenizer.build_inputs_with_special_tokens(lowercase__ , lowercase__ ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode @slow def snake_case__ ( self : Dict ) ->Optional[Any]: '''simple docstring''' _UpperCamelCase : str = [self.tokenizer_class] if self.test_rust_tokenizer: tokenizer_classes.append(self.rust_tokenizer_class ) for tokenizer_class in tokenizer_classes: _UpperCamelCase : int = tokenizer_class.from_pretrained("microsoft/deberta-base" ) _UpperCamelCase : List[str] = [ "ALBERT: A Lite BERT for Self-supervised Learning of Language Representations", "ALBERT incorporates two parameter reduction techniques", "The first one is a factorized embedding parameterization. By decomposing the large vocabulary" " embedding matrix into two small matrices, we separate the size of the hidden layers from the size of" " vocabulary embedding.", ] _UpperCamelCase : Optional[int] = tokenizer(lowercase__ , padding=lowercase__ ) _UpperCamelCase : Union[str, Any] = [tokenizer.decode(lowercase__ , skip_special_tokens=lowercase__ ) for seq in encoding["input_ids"]] # fmt: off _UpperCamelCase : List[Any] = { "input_ids": [ [1, 2_118, 11_126, 565, 35, 83, 25_191, 163, 18_854, 13, 12_156, 12, 16_101, 25_376, 13_807, 9, 22_205, 27_893, 1_635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 2_118, 11_126, 565, 24_536, 80, 43_797, 4_878, 7_373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 133, 78, 65, 16, 10, 3_724, 1_538, 33_183, 11_303, 43_797, 1_938, 4, 870, 24_165, 29_105, 5, 739, 32_644, 33_183, 11_303, 36_173, 88, 80, 650, 7_821, 45_940, 6, 52, 2_559, 5, 1_836, 9, 5, 7_397, 13_171, 31, 5, 1_836, 9, 32_644, 33_183, 11_303, 4, 2] ], "token_type_ids": [ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ], "attention_mask": [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] ] } # fmt: on _UpperCamelCase : Any = [ "ALBERT: A Lite BERT for Self-supervised Learning of Language Representations", "ALBERT incorporates two parameter reduction techniques", "The first one is a factorized embedding parameterization. By decomposing the large vocabulary" " embedding matrix into two small matrices, we separate the size of the hidden layers from the size of" " vocabulary embedding.", ] self.assertDictEqual(encoding.data , lowercase__ ) for expected, decoded in zip(lowercase__ , lowercase__ ): self.assertEqual(lowercase__ , lowercase__ )
435
0
'''simple docstring''' import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast @require_vision class lowercase__ ( unittest.TestCase ): '''simple docstring''' def UpperCAmelCase ( self ): '''simple docstring''' UpperCamelCase = tempfile.mkdtemp() UpperCamelCase = BlipImageProcessor() UpperCamelCase = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-BertModel''' ) UpperCamelCase = BlipProcessor(_lowerCAmelCase , _lowerCAmelCase ) processor.save_pretrained(self.tmpdirname ) def UpperCAmelCase ( self , **lowerCamelCase__ ): '''simple docstring''' return AutoProcessor.from_pretrained(self.tmpdirname , **_lowerCAmelCase ).tokenizer def UpperCAmelCase ( self , **lowerCamelCase__ ): '''simple docstring''' return AutoProcessor.from_pretrained(self.tmpdirname , **_lowerCAmelCase ).image_processor def UpperCAmelCase ( self ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def UpperCAmelCase ( self ): '''simple docstring''' UpperCamelCase = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )] UpperCamelCase = [Image.fromarray(np.moveaxis(_lowerCAmelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def UpperCAmelCase ( self ): '''simple docstring''' UpperCamelCase = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) UpperCamelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) UpperCamelCase = self.get_image_processor(do_normalize=_lowerCAmelCase , padding_value=1.0 ) UpperCamelCase = BlipProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_lowerCAmelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , _lowerCAmelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _lowerCAmelCase ) def UpperCAmelCase ( self ): '''simple docstring''' UpperCamelCase = self.get_image_processor() UpperCamelCase = self.get_tokenizer() UpperCamelCase = BlipProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase ) UpperCamelCase = self.prepare_image_inputs() UpperCamelCase = image_processor(_lowerCAmelCase , return_tensors='''np''' ) UpperCamelCase = processor(images=_lowerCAmelCase , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def UpperCAmelCase ( self ): '''simple docstring''' UpperCamelCase = self.get_image_processor() UpperCamelCase = self.get_tokenizer() UpperCamelCase = BlipProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase ) UpperCamelCase = '''lower newer''' UpperCamelCase = processor(text=_lowerCAmelCase ) UpperCamelCase = tokenizer(_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def UpperCAmelCase ( self ): '''simple docstring''' UpperCamelCase = self.get_image_processor() UpperCamelCase = self.get_tokenizer() UpperCamelCase = BlipProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase ) UpperCamelCase = '''lower newer''' UpperCamelCase = self.prepare_image_inputs() UpperCamelCase = processor(text=_lowerCAmelCase , images=_lowerCAmelCase ) self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] ) # test if it raises when no input is passed with pytest.raises(_lowerCAmelCase ): processor() def UpperCAmelCase ( self ): '''simple docstring''' UpperCamelCase = self.get_image_processor() UpperCamelCase = self.get_tokenizer() UpperCamelCase = BlipProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase ) UpperCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] UpperCamelCase = processor.batch_decode(_lowerCAmelCase ) UpperCamelCase = tokenizer.batch_decode(_lowerCAmelCase ) self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase ) def UpperCAmelCase ( self ): '''simple docstring''' UpperCamelCase = self.get_image_processor() UpperCamelCase = self.get_tokenizer() UpperCamelCase = BlipProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase ) UpperCamelCase = '''lower newer''' UpperCamelCase = self.prepare_image_inputs() UpperCamelCase = processor(text=_lowerCAmelCase , images=_lowerCAmelCase ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
709
'''simple docstring''' def __snake_case ( _UpperCAmelCase : Optional[int]): UpperCamelCase = [] UpperCamelCase = [] UpperCamelCase = { '''^''': 3, '''*''': 2, '''/''': 2, '''%''': 2, '''+''': 1, '''-''': 1, } # Priority of each operator UpperCamelCase = len(_UpperCAmelCase) if (len(_UpperCAmelCase) > 7) else 7 # Print table header for output print( '''Symbol'''.center(8), '''Stack'''.center(_UpperCAmelCase), '''Postfix'''.center(_UpperCAmelCase), sep=''' | ''', ) print('''-''' * (print_width * 3 + 7)) for x in infix: if x.isalpha() or x.isdigit(): post_fix.append(_UpperCAmelCase) # if x is Alphabet / Digit, add it to Postfix elif x == "(": stack.append(_UpperCAmelCase) # if x is "(" push to Stack elif x == ")": # if x is ")" pop stack until "(" is encountered while stack[-1] != "(": post_fix.append(stack.pop()) # Pop stack & add the content to Postfix stack.pop() else: if len(_UpperCAmelCase) == 0: stack.append(_UpperCAmelCase) # If stack is empty, push x to stack else: # while priority of x is not > priority of element in the stack while len(_UpperCAmelCase) > 0 and priority[x] <= priority[stack[-1]]: post_fix.append(stack.pop()) # pop stack & add to Postfix stack.append(_UpperCAmelCase) # push x to stack print( x.center(8), (''''''.join(_UpperCAmelCase)).ljust(_UpperCAmelCase), (''''''.join(_UpperCAmelCase)).ljust(_UpperCAmelCase), sep=''' | ''', ) # Output in tabular format while len(_UpperCAmelCase) > 0: # while stack is not empty post_fix.append(stack.pop()) # pop stack & add to Postfix print( ''' '''.center(8), (''''''.join(_UpperCAmelCase)).ljust(_UpperCAmelCase), (''''''.join(_UpperCAmelCase)).ljust(_UpperCAmelCase), sep=''' | ''', ) # Output in tabular format return "".join(_UpperCAmelCase) # return Postfix as str def __snake_case ( _UpperCAmelCase : str): UpperCamelCase = list(infix[::-1]) # reverse the infix equation for i in range(len(_UpperCAmelCase)): if infix[i] == "(": UpperCamelCase = ''')''' # change "(" to ")" elif infix[i] == ")": UpperCamelCase = '''(''' # change ")" to "(" return (infix_2_postfix(''''''.join(_UpperCAmelCase)))[ ::-1 ] # call infix_2_postfix on Infix, return reverse of Postfix if __name__ == "__main__": snake_case_ : str = input('\nEnter an Infix Equation = ') # Input an Infix equation snake_case_ : Union[str, Any] = ''.join(Infix.split()) # Remove spaces from the input print('\n\t', Infix, '(Infix) -> ', infix_2_prefix(Infix), '(Prefix)')
350
0
import warnings from ...utils import is_sklearn_available, requires_backends if is_sklearn_available(): from scipy.stats import pearsonr, spearmanr from sklearn.metrics import fa_score, matthews_corrcoef lowerCamelCase_ : List[str] = ( """This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate """ """library. You can have a look at this example script for pointers: """ """https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py""" ) def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase ): warnings.warn(__lowerCamelCase , __lowerCamelCase ) requires_backends(__lowerCamelCase , 'sklearn' ) return (preds == labels).mean() def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase ): warnings.warn(__lowerCamelCase , __lowerCamelCase ) requires_backends(__lowerCamelCase , 'sklearn' ) __a = simple_accuracy(__lowerCamelCase , __lowerCamelCase ) __a = fa_score(y_true=__lowerCamelCase , y_pred=__lowerCamelCase ) return { "acc": acc, "f1": fa, "acc_and_f1": (acc + fa) / 2, } def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase ): warnings.warn(__lowerCamelCase , __lowerCamelCase ) requires_backends(__lowerCamelCase , 'sklearn' ) __a = pearsonr(__lowerCamelCase , __lowerCamelCase )[0] __a = spearmanr(__lowerCamelCase , __lowerCamelCase )[0] return { "pearson": pearson_corr, "spearmanr": spearman_corr, "corr": (pearson_corr + spearman_corr) / 2, } def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ): warnings.warn(__lowerCamelCase , __lowerCamelCase ) requires_backends(__lowerCamelCase , 'sklearn' ) assert len(__lowerCamelCase ) == len(__lowerCamelCase ), f'''Predictions and labels have mismatched lengths {len(__lowerCamelCase )} and {len(__lowerCamelCase )}''' if task_name == "cola": return {"mcc": matthews_corrcoef(__lowerCamelCase , __lowerCamelCase )} elif task_name == "sst-2": return {"acc": simple_accuracy(__lowerCamelCase , __lowerCamelCase )} elif task_name == "mrpc": return acc_and_fa(__lowerCamelCase , __lowerCamelCase ) elif task_name == "sts-b": return pearson_and_spearman(__lowerCamelCase , __lowerCamelCase ) elif task_name == "qqp": return acc_and_fa(__lowerCamelCase , __lowerCamelCase ) elif task_name == "mnli": return {"mnli/acc": simple_accuracy(__lowerCamelCase , __lowerCamelCase )} elif task_name == "mnli-mm": return {"mnli-mm/acc": simple_accuracy(__lowerCamelCase , __lowerCamelCase )} elif task_name == "qnli": return {"acc": simple_accuracy(__lowerCamelCase , __lowerCamelCase )} elif task_name == "rte": return {"acc": simple_accuracy(__lowerCamelCase , __lowerCamelCase )} elif task_name == "wnli": return {"acc": simple_accuracy(__lowerCamelCase , __lowerCamelCase )} elif task_name == "hans": return {"acc": simple_accuracy(__lowerCamelCase , __lowerCamelCase )} else: raise KeyError(__lowerCamelCase ) def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ): warnings.warn(__lowerCamelCase , __lowerCamelCase ) requires_backends(__lowerCamelCase , 'sklearn' ) if len(__lowerCamelCase ) != len(__lowerCamelCase ): raise ValueError(f'''Predictions and labels have mismatched lengths {len(__lowerCamelCase )} and {len(__lowerCamelCase )}''' ) if task_name == "xnli": return {"acc": simple_accuracy(__lowerCamelCase , __lowerCamelCase )} else: raise KeyError(__lowerCamelCase )
559
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase ): # Check if the input is valid if not len(__lowerCamelCase ) == len(__lowerCamelCase ) == 3: raise ValueError('Please enter a valid equation.' ) if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0: raise ValueError('Both a & b of two equations can\'t be zero.' ) # Extract the coefficients __a , __a , __a = equationa __a , __a , __a = equationa # Calculate the determinants of the matrices __a = aa * ba - aa * ba __a = ca * ba - ca * ba __a = aa * ca - aa * ca # Check if the system of linear equations has a solution (using Cramer's rule) if determinant == 0: if determinant_x == determinant_y == 0: raise ValueError('Infinite solutions. (Consistent system)' ) else: raise ValueError('No solution. (Inconsistent system)' ) else: if determinant_x == determinant_y == 0: # Trivial solution (Inconsistent system) return (0.0, 0.0) else: __a = determinant_x / determinant __a = determinant_y / determinant # Non-Trivial Solution (Consistent system) return (x, y)
559
1
import numpy class UpperCAmelCase__ : def __init__( self , A__ , A__ ): """simple docstring""" UpperCAmelCase_: Optional[Any] = input_array # Random initial weights are assigned where first argument is the # number of nodes in previous layer and second argument is the # number of nodes in the next layer. # Random initial weights are assigned. # self.input_array.shape[1] is used to represent number of nodes in input layer. # First hidden layer consists of 4 nodes. UpperCAmelCase_: str = numpy.random.rand( self.input_array.shape[1] , 4 ) # Random initial values for the first hidden layer. # First hidden layer has 4 nodes. # Second hidden layer has 3 nodes. UpperCAmelCase_: Tuple = numpy.random.rand( 4 , 3 ) # Random initial values for the second hidden layer. # Second hidden layer has 3 nodes. # Output layer has 1 node. UpperCAmelCase_: Dict = numpy.random.rand(3 , 1 ) # Real output values provided. UpperCAmelCase_: Any = output_array # Predicted output values by the neural network. # Predicted_output array initially consists of zeroes. UpperCAmelCase_: Dict = numpy.zeros(output_array.shape ) def snake_case_ ( self ): """simple docstring""" UpperCAmelCase_: Dict = sigmoid( numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) ) # layer_between_first_hidden_layer_and_second_hidden_layer is the layer # connecting the first hidden set of nodes with the second hidden set of nodes. UpperCAmelCase_: List[str] = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) ) # layer_between_second_hidden_layer_and_output is the layer connecting # second hidden layer with the output node. UpperCAmelCase_: Dict = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) ) return self.layer_between_second_hidden_layer_and_output def snake_case_ ( self ): """simple docstring""" UpperCAmelCase_: Any = numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , ) UpperCAmelCase_: List[str] = numpy.dot( self.layer_between_input_and_first_hidden_layer.T , numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer ) , ) UpperCAmelCase_: Optional[int] = numpy.dot( self.input_array.T , numpy.dot( numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , ) * sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , ) self.input_layer_and_first_hidden_layer_weights += ( updated_input_layer_and_first_hidden_layer_weights ) self.first_hidden_layer_and_second_hidden_layer_weights += ( updated_first_hidden_layer_and_second_hidden_layer_weights ) self.second_hidden_layer_and_output_layer_weights += ( updated_second_hidden_layer_and_output_layer_weights ) def snake_case_ ( self , A__ , A__ , A__ ): """simple docstring""" for iteration in range(1 , iterations + 1 ): UpperCAmelCase_: Any = self.feedforward() self.back_propagation() if give_loss: UpperCAmelCase_: List[str] = numpy.mean(numpy.square(output - self.feedforward() ) ) print(F"Iteration {iteration} Loss: {loss}" ) def snake_case_ ( self , A__ ): """simple docstring""" UpperCAmelCase_: Dict = input_arr UpperCAmelCase_: Dict = sigmoid( numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) ) UpperCAmelCase_: Union[str, Any] = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) ) UpperCAmelCase_: Dict = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) ) return int(self.layer_between_second_hidden_layer_and_output > 0.6 ) def lowercase ( _a ) -> numpy.ndarray: return 1 / (1 + numpy.exp(-value )) def lowercase ( _a ) -> numpy.ndarray: return (value) * (1 - (value)) def lowercase ( ) -> int: UpperCAmelCase_: Tuple = numpy.array( ( [0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1], ) ,dtype=numpy.floataa ,) # True output values for the given input values. UpperCAmelCase_: Optional[int] = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) ,dtype=numpy.floataa ) # Calling neural network class. UpperCAmelCase_: Tuple = TwoHiddenLayerNeuralNetwork( input_array=_a ,output_array=_a ) # Calling training function. # Set give_loss to True if you want to see loss in every iteration. neural_network.train(output=_a ,iterations=10 ,give_loss=_a ) return neural_network.predict(numpy.array(([1, 1, 1]) ,dtype=numpy.floataa ) ) if __name__ == "__main__": example()
306
_lowerCAmelCase = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []} _lowerCAmelCase = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]} def lowercase ( _a ,_a ,_a ) -> list[int]: UpperCAmelCase_: Tuple = True UpperCAmelCase_: Optional[int] = [] for neighbour in graph[vert]: if not visited[neighbour]: order += topology_sort(_a ,_a ,_a ) order.append(_a ) return order def lowercase ( _a ,_a ,_a ) -> list[int]: UpperCAmelCase_: Optional[int] = True UpperCAmelCase_: str = [vert] for neighbour in reversed_graph[vert]: if not visited[neighbour]: component += find_components(_a ,_a ,_a ) return component def lowercase ( _a ) -> list[list[int]]: UpperCAmelCase_: Union[str, Any] = len(_a ) * [False] UpperCAmelCase_: dict[int, list[int]] = {vert: [] for vert in range(len(_a ) )} for vert, neighbours in graph.items(): for neighbour in neighbours: reversed_graph[neighbour].append(_a ) UpperCAmelCase_: Optional[int] = [] for i, was_visited in enumerate(_a ): if not was_visited: order += topology_sort(_a ,_a ,_a ) UpperCAmelCase_: Optional[Any] = [] UpperCAmelCase_: Union[str, Any] = len(_a ) * [False] for i in range(len(_a ) ): UpperCAmelCase_: str = order[len(_a ) - i - 1] if not visited[vert]: UpperCAmelCase_: List[str] = find_components(_a ,_a ,_a ) components_list.append(_a ) return components_list
306
1
from collections.abc import Sequence def lowerCAmelCase_ ( __a , __a = False ) -> float: """simple docstring""" if not arr: return 0 lowerCamelCase__: Tuple =0 if allow_empty_subarrays else float("-inf" ) lowerCamelCase__: Optional[int] =0.0 for num in arr: lowerCamelCase__: Union[str, Any] =max(0 if allow_empty_subarrays else num , curr_sum + num ) lowerCamelCase__: List[str] =max(__a , __a ) return max_sum if __name__ == "__main__": from doctest import testmod testmod() __A = [-2, 1, -3, 4, -1, 2, 1, -5, 4] print(f'{max_subarray_sum(nums) = }')
59
import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def lowerCAmelCase_ ( __a , __a ) -> List[Any]: """simple docstring""" assert isinstance(__a , __a ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def lowerCAmelCase_ ( __a , __a , __a ) -> Any: """simple docstring""" lowerCamelCase__: Any =tmp_path / "cache" lowerCamelCase__: Optional[int] ={"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCamelCase__: Tuple =ParquetDatasetReader(__a , cache_dir=__a , keep_in_memory=__a ).read() _check_parquet_dataset(__a , __a ) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def lowerCAmelCase_ ( __a , __a , __a ) -> List[str]: """simple docstring""" lowerCamelCase__: int =tmp_path / "cache" lowerCamelCase__: Tuple ={"col_1": "string", "col_2": "int64", "col_3": "float64"} lowerCamelCase__: Union[str, Any] =features.copy() if features else default_expected_features lowerCamelCase__: Optional[int] =( Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCamelCase__: int =ParquetDatasetReader(__a , features=__a , cache_dir=__a ).read() _check_parquet_dataset(__a , __a ) @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def lowerCAmelCase_ ( __a , __a , __a ) -> Any: """simple docstring""" lowerCamelCase__: Any =tmp_path / "cache" lowerCamelCase__: Optional[Any] ={"col_1": "string", "col_2": "int64", "col_3": "float64"} lowerCamelCase__: Optional[Any] =ParquetDatasetReader(__a , cache_dir=__a , split=__a ).read() _check_parquet_dataset(__a , __a ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("path_type" , [str, list] ) def lowerCAmelCase_ ( __a , __a , __a ) -> int: """simple docstring""" if issubclass(__a , __a ): lowerCamelCase__: List[Any] =parquet_path elif issubclass(__a , __a ): lowerCamelCase__: str =[parquet_path] lowerCamelCase__: Tuple =tmp_path / "cache" lowerCamelCase__: Optional[Any] ={"col_1": "string", "col_2": "int64", "col_3": "float64"} lowerCamelCase__: int =ParquetDatasetReader(__a , cache_dir=__a ).read() _check_parquet_dataset(__a , __a ) def lowerCAmelCase_ ( __a , __a , __a=("train",) ) -> Dict: """simple docstring""" assert isinstance(__a , __a ) for split in splits: lowerCamelCase__: Tuple =dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def lowerCAmelCase_ ( __a , __a , __a ) -> Any: """simple docstring""" lowerCamelCase__: List[Any] =tmp_path / "cache" lowerCamelCase__: Optional[Any] ={"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCamelCase__: Tuple =ParquetDatasetReader( {"train": parquet_path} , cache_dir=__a , keep_in_memory=__a ).read() _check_parquet_datasetdict(__a , __a ) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def lowerCAmelCase_ ( __a , __a , __a ) -> Optional[Any]: """simple docstring""" lowerCamelCase__: Tuple =tmp_path / "cache" lowerCamelCase__: Optional[int] ={"col_1": "string", "col_2": "int64", "col_3": "float64"} lowerCamelCase__: List[Any] =features.copy() if features else default_expected_features lowerCamelCase__: int =( Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCamelCase__: Optional[Any] =ParquetDatasetReader({"train": parquet_path} , features=__a , cache_dir=__a ).read() _check_parquet_datasetdict(__a , __a ) @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def lowerCAmelCase_ ( __a , __a , __a ) -> Union[str, Any]: """simple docstring""" if split: lowerCamelCase__: Any ={split: parquet_path} else: lowerCamelCase__: int ="train" lowerCamelCase__: Any ={"train": parquet_path, "test": parquet_path} lowerCamelCase__: str =tmp_path / "cache" lowerCamelCase__: Any ={"col_1": "string", "col_2": "int64", "col_3": "float64"} lowerCamelCase__: int =ParquetDatasetReader(__a , cache_dir=__a ).read() _check_parquet_datasetdict(__a , __a , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def lowerCAmelCase_ ( __a , __a ) -> int: """simple docstring""" lowerCamelCase__: List[str] =ParquetDatasetWriter(__a , tmp_path / "foo.parquet" ) assert writer.write() > 0 lowerCamelCase__: List[str] =pq.ParquetFile(tmp_path / "foo.parquet" ) lowerCamelCase__: List[str] =pf.read() assert dataset.data.table == output_table def lowerCAmelCase_ ( __a , __a ) -> List[str]: """simple docstring""" lowerCamelCase__: List[str] =str(shared_datadir / "test_image_rgb.jpg" ) lowerCamelCase__: Union[str, Any] ={"image": [image_path]} lowerCamelCase__: Optional[Any] =Features({"image": Image()} ) lowerCamelCase__: Optional[int] =Dataset.from_dict(__a , features=__a ) lowerCamelCase__: Optional[int] =ParquetDatasetWriter(__a , tmp_path / "foo.parquet" ) assert writer.write() > 0 lowerCamelCase__: Dict =Dataset.from_parquet(str(tmp_path / "foo.parquet" ) ) assert dataset.features == reloaded_dataset.features lowerCamelCase__: Optional[Any] =ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=__a ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( "feature, expected" , [ (Features({"foo": Value("int32" )} ), None), (Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ] , ) def lowerCAmelCase_ ( __a , __a ) -> Optional[Any]: """simple docstring""" assert get_writer_batch_size(__a ) == expected
59
1
'''simple docstring''' import pytest from datasets.splits import SplitDict, SplitInfo from datasets.utils.py_utils import asdict @pytest.mark.parametrize( '''split_dict''', [ SplitDict(), SplitDict({'''train''': SplitInfo(name='''train''', num_bytes=1337, num_examples=42, dataset_name='''my_dataset''')}), SplitDict({'''train''': SplitInfo(name='''train''', num_bytes=1337, num_examples=42)}), SplitDict({'''train''': SplitInfo()}), ], ) def __snake_case ( _UpperCAmelCase : SplitDict): UpperCamelCase = split_dict._to_yaml_list() assert len(_UpperCAmelCase) == len(_UpperCAmelCase) UpperCamelCase = SplitDict._from_yaml_list(_UpperCAmelCase) for split_name, split_info in split_dict.items(): # dataset_name field is deprecated, and is therefore not part of the YAML dump UpperCamelCase = None # the split name of split_dict takes over the name of the split info object UpperCamelCase = split_name assert split_dict == reloaded @pytest.mark.parametrize( '''split_info''', [SplitInfo(), SplitInfo(dataset_name=_UpperCAmelCase), SplitInfo(dataset_name='''my_dataset''')]) def __snake_case ( _UpperCAmelCase : Optional[Any]): # For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name" # field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files UpperCamelCase = asdict(SplitDict({'''train''': split_info})) assert "dataset_name" in split_dict_asdict["train"] assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
712
'''simple docstring''' import unittest from transformers import PegasusTokenizer, PegasusTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin snake_case_ : Tuple = get_tests_dir('fixtures/test_sentencepiece_no_bos.model') @require_sentencepiece @require_tokenizers class lowercase__ ( snake_case_, unittest.TestCase ): '''simple docstring''' _snake_case = PegasusTokenizer _snake_case = PegasusTokenizerFast _snake_case = True _snake_case = True def UpperCAmelCase ( self ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing UpperCamelCase = PegasusTokenizer(lowerCamelCase__ ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def UpperCAmelCase ( self ): '''simple docstring''' return PegasusTokenizer.from_pretrained('''google/pegasus-large''' ) def UpperCAmelCase ( self , **lowerCamelCase__ ): '''simple docstring''' return PegasusTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase__ ) def UpperCAmelCase ( self , lowerCamelCase__ ): '''simple docstring''' return ("This is a test", "This is a test") def UpperCAmelCase ( self ): '''simple docstring''' UpperCamelCase = '''</s>''' UpperCamelCase = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) , lowerCamelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) , lowerCamelCase__ ) def UpperCAmelCase ( self ): '''simple docstring''' UpperCamelCase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<pad>''' ) self.assertEqual(vocab_keys[1] , '''</s>''' ) self.assertEqual(vocab_keys[-1] , '''v''' ) self.assertEqual(len(lowerCamelCase__ ) , 1_1_0_3 ) def UpperCAmelCase ( self ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1_1_0_3 ) def UpperCAmelCase ( self ): '''simple docstring''' UpperCamelCase = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) UpperCamelCase = self.tokenizer_class.from_pretrained(self.tmpdirname ) UpperCamelCase = ( '''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important''' ''' </s> <pad> <pad> <pad>''' ) UpperCamelCase = rust_tokenizer([raw_input_str] , return_tensors=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ).input_ids[0] UpperCamelCase = py_tokenizer([raw_input_str] , return_tensors=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ).input_ids[0] self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) def UpperCAmelCase ( self ): '''simple docstring''' UpperCamelCase = self._large_tokenizer # <mask_1> masks whole sentence while <mask_2> masks single word UpperCamelCase = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.''' UpperCamelCase = [2, 4_1_3, 6_1_5, 1_1_4, 3, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1] UpperCamelCase = tokenizer([raw_input_str] , return_tensors=lowerCamelCase__ ).input_ids[0] self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) def UpperCAmelCase ( self ): '''simple docstring''' UpperCamelCase = self._large_tokenizer # The tracebacks for the following asserts are **better** without messages or self.assertEqual assert tokenizer.vocab_size == 9_6_1_0_3 assert tokenizer.pad_token_id == 0 assert tokenizer.eos_token_id == 1 assert tokenizer.offset == 1_0_3 assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_0_5 assert tokenizer.unk_token == "<unk>" assert tokenizer.model_max_length == 1_0_2_4 UpperCamelCase = '''To ensure a smooth flow of bank resolutions.''' UpperCamelCase = [4_1_3, 6_1_5, 1_1_4, 2_2_9_1, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1] UpperCamelCase = tokenizer([raw_input_str] , return_tensors=lowerCamelCase__ ).input_ids[0] self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"] @require_torch def UpperCAmelCase ( self ): '''simple docstring''' UpperCamelCase = ['''This is going to be way too long.''' * 1_5_0, '''short example'''] UpperCamelCase = ['''not super long but more than 5 tokens''', '''tiny'''] UpperCamelCase = self._large_tokenizer(lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , return_tensors='''pt''' ) UpperCamelCase = self._large_tokenizer( text_target=lowerCamelCase__ , max_length=5 , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , return_tensors='''pt''' ) assert batch.input_ids.shape == (2, 1_0_2_4) assert batch.attention_mask.shape == (2, 1_0_2_4) assert targets["input_ids"].shape == (2, 5) assert len(lowerCamelCase__ ) == 2 # input_ids, attention_mask. @slow def UpperCAmelCase ( self ): '''simple docstring''' UpperCamelCase = {'''input_ids''': [[3_8_9_7_9, 1_4_3, 1_8_4_8_5, 6_0_6, 1_3_0, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 5_4_1_8_9, 1_1_2_9, 1_1_1, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 9_1_1_4, 1_4_7_8_7, 1_2_1, 1_3_2_4_9, 1_5_8, 5_9_2, 9_5_6, 1_2_1, 1_4_6_2_1, 3_1_5_7_6, 1_4_3, 6_2_6_1_3, 1_0_8, 9_6_8_8, 9_3_0, 4_3_4_3_0, 1_1_5_6_2, 6_2_6_1_3, 3_0_4, 1_0_8, 1_1_4_4_3, 8_9_7, 1_0_8, 9_3_1_4, 1_7_4_1_5, 6_3_3_9_9, 1_0_8, 1_1_4_4_3, 7_6_1_4, 1_8_3_1_6, 1_1_8, 4_2_8_4, 7_1_4_8, 1_2_4_3_0, 1_4_3, 1_4_0_0, 2_5_7_0_3, 1_5_8, 1_1_1, 4_2_8_4, 7_1_4_8, 1_1_7_7_2, 1_4_3, 2_1_2_9_7, 1_0_6_4, 1_5_8, 1_2_2, 2_0_4, 3_5_0_6, 1_7_5_4, 1_1_3_3, 1_4_7_8_7, 1_5_8_1, 1_1_5, 3_3_2_2_4, 4_4_8_2, 1_1_1, 1_3_5_5, 1_1_0, 2_9_1_7_3, 3_1_7, 5_0_8_3_3, 1_0_8, 2_0_1_4_7, 9_4_6_6_5, 1_1_1, 7_7_1_9_8, 1_0_7, 1], [1_1_0, 6_2_6_1_3, 1_1_7, 6_3_8, 1_1_2, 1_1_3_3, 1_2_1, 2_0_0_9_8, 1_3_5_5, 7_9_0_5_0, 1_3_8_7_2, 1_3_5, 1_5_9_6, 5_3_5_4_1, 1_3_5_2, 1_4_1, 1_3_0_3_9, 5_5_4_2, 1_2_4, 3_0_2, 5_1_8, 1_1_1, 2_6_8, 2_9_5_6, 1_1_5, 1_4_9, 4_4_2_7, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_3_9, 1_2_3_5, 2_7_9_9, 1_8_2_8_9, 1_7_7_8_0, 2_0_4, 1_0_9, 9_4_7_4, 1_2_9_6, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowerCamelCase__ , model_name='''google/bigbird-pegasus-large-arxiv''' , revision='''ba85d0851d708441f91440d509690f1ab6353415''' , ) @require_sentencepiece @require_tokenizers class lowercase__ ( snake_case_, unittest.TestCase ): '''simple docstring''' _snake_case = PegasusTokenizer _snake_case = PegasusTokenizerFast _snake_case = True _snake_case = True def UpperCAmelCase ( self ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing UpperCamelCase = PegasusTokenizer(lowerCamelCase__ , offset=0 , mask_token_sent=lowerCamelCase__ , mask_token='''[MASK]''' ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def UpperCAmelCase ( self ): '''simple docstring''' return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' ) def UpperCAmelCase ( self , **lowerCamelCase__ ): '''simple docstring''' return PegasusTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase__ ) def UpperCAmelCase ( self , lowerCamelCase__ ): '''simple docstring''' return ("This is a test", "This is a test") def UpperCAmelCase ( self ): '''simple docstring''' UpperCamelCase = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) UpperCamelCase = self.tokenizer_class.from_pretrained(self.tmpdirname ) UpperCamelCase = ( '''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>''' ''' <pad> <pad> <pad>''' ) UpperCamelCase = rust_tokenizer([raw_input_str] , return_tensors=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ).input_ids[0] UpperCamelCase = py_tokenizer([raw_input_str] , return_tensors=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ).input_ids[0] self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) @require_torch def UpperCAmelCase ( self ): '''simple docstring''' UpperCamelCase = ['''This is going to be way too long.''' * 1_0_0_0, '''short example'''] UpperCamelCase = ['''not super long but more than 5 tokens''', '''tiny'''] UpperCamelCase = self._large_tokenizer(lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , return_tensors='''pt''' ) UpperCamelCase = self._large_tokenizer( text_target=lowerCamelCase__ , max_length=5 , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , return_tensors='''pt''' ) assert batch.input_ids.shape == (2, 4_0_9_6) assert batch.attention_mask.shape == (2, 4_0_9_6) assert targets["input_ids"].shape == (2, 5) assert len(lowerCamelCase__ ) == 2 # input_ids, attention_mask. def UpperCAmelCase ( self ): '''simple docstring''' UpperCamelCase = ( '''This is an example string that is used to test the original TF implementation against the HF''' ''' implementation''' ) UpperCamelCase = self._large_tokenizer(lowerCamelCase__ ).input_ids self.assertListEqual( lowerCamelCase__ , [1_8_2, 1_1_7, 1_4_2, 5_8_7, 4_2_1_1, 1_2_0, 1_1_7, 2_6_3, 1_1_2, 8_0_4, 1_0_9, 8_5_6, 2_5_0_1_6, 3_1_3_7, 4_6_4, 1_0_9, 2_6_9_5_5, 3_1_3_7, 1] , )
350
0
'''simple docstring''' import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import ( BitConfig, ViTHybridConfig, ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel, ) from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase__ = logging.get_logger(__name__) def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_=False) -> List[Any]: UpperCamelCase__ : Any = [] # fmt: off # stem: rename_keys.append(('cls_token', 'vit.embeddings.cls_token')) rename_keys.append(('pos_embed', 'vit.embeddings.position_embeddings')) rename_keys.append(('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight')) rename_keys.append(('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias')) # backbone rename_keys.append(('patch_embed.backbone.stem.conv.weight', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight')) rename_keys.append(('patch_embed.backbone.stem.norm.weight', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight')) rename_keys.append(('patch_embed.backbone.stem.norm.bias', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias')) for stage_idx in range(len(config.backbone_config.depths)): for layer_idx in range(config.backbone_config.depths[stage_idx]): rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight')) rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight')) rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias')) rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight')) rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight')) rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias')) rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight')) rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight')) rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias')) rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight')) rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight')) rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias')) # transformer encoder for i in range(config.num_hidden_layers): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f'blocks.{i}.norm1.weight', f'vit.encoder.layer.{i}.layernorm_before.weight')) rename_keys.append((f'blocks.{i}.norm1.bias', f'vit.encoder.layer.{i}.layernorm_before.bias')) rename_keys.append((f'blocks.{i}.attn.proj.weight', f'vit.encoder.layer.{i}.attention.output.dense.weight')) rename_keys.append((f'blocks.{i}.attn.proj.bias', f'vit.encoder.layer.{i}.attention.output.dense.bias')) rename_keys.append((f'blocks.{i}.norm2.weight', f'vit.encoder.layer.{i}.layernorm_after.weight')) rename_keys.append((f'blocks.{i}.norm2.bias', f'vit.encoder.layer.{i}.layernorm_after.bias')) rename_keys.append((f'blocks.{i}.mlp.fc1.weight', f'vit.encoder.layer.{i}.intermediate.dense.weight')) rename_keys.append((f'blocks.{i}.mlp.fc1.bias', f'vit.encoder.layer.{i}.intermediate.dense.bias')) rename_keys.append((f'blocks.{i}.mlp.fc2.weight', f'vit.encoder.layer.{i}.output.dense.weight')) rename_keys.append((f'blocks.{i}.mlp.fc2.bias', f'vit.encoder.layer.{i}.output.dense.bias')) if base_model: # layernorm + pooler rename_keys.extend( [ ('norm.weight', 'layernorm.weight'), ('norm.bias', 'layernorm.bias'), ('pre_logits.fc.weight', 'pooler.dense.weight'), ('pre_logits.fc.bias', 'pooler.dense.bias'), ]) # if just the base model, we should remove "vit" from all keys that start with "vit" UpperCamelCase__ : Union[str, Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('vit') else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ('norm.weight', 'vit.layernorm.weight'), ('norm.bias', 'vit.layernorm.bias'), ('head.weight', 'classifier.weight'), ('head.bias', 'classifier.bias'), ]) # fmt: on return rename_keys def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False) -> Tuple: for i in range(config.num_hidden_layers): if base_model: UpperCamelCase__ : Union[str, Any] = '' else: UpperCamelCase__ : Optional[int] = 'vit.' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) UpperCamelCase__ : List[Any] = state_dict.pop(f'blocks.{i}.attn.qkv.weight') UpperCamelCase__ : Optional[Any] = state_dict.pop(f'blocks.{i}.attn.qkv.bias') # next, add query, keys and values (in that order) to the state dict UpperCamelCase__ : List[str] = in_proj_weight[ : config.hidden_size, : ] UpperCamelCase__ : str = in_proj_bias[: config.hidden_size] UpperCamelCase__ : Optional[int] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] UpperCamelCase__ : Tuple = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] UpperCamelCase__ : Union[str, Any] = in_proj_weight[ -config.hidden_size :, : ] UpperCamelCase__ : Dict = in_proj_bias[-config.hidden_size :] def __UpperCAmelCase ( lowerCamelCase_) -> str: UpperCamelCase__ : Tuple = ['head.weight', 'head.bias'] for k in ignore_keys: state_dict.pop(UpperCamelCase__ , UpperCamelCase__) def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> List[Any]: UpperCamelCase__ : int = dct.pop(UpperCamelCase__) UpperCamelCase__ : Union[str, Any] = val def __UpperCAmelCase ( ) -> Optional[Any]: UpperCamelCase__ : Any = 'http://images.cocodataset.org/val2017/000000039769.jpg' UpperCamelCase__ : Optional[int] = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__).raw) return im @torch.no_grad() def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False) -> Any: UpperCamelCase__ : Dict = BitConfig( global_padding='same' , layer_type='bottleneck' , depths=(3, 4, 9) , out_features=['stage3'] , embedding_dynamic_padding=UpperCamelCase__ , ) UpperCamelCase__ : int = ViTHybridConfig(backbone_config=UpperCamelCase__ , image_size=384 , num_labels=1_000) UpperCamelCase__ : List[str] = False # load original model from timm UpperCamelCase__ : List[str] = timm.create_model(UpperCamelCase__ , pretrained=UpperCamelCase__) timm_model.eval() # load state_dict of original model, remove and rename some keys UpperCamelCase__ : Any = timm_model.state_dict() if base_model: remove_classification_head_(UpperCamelCase__) UpperCamelCase__ : Optional[Any] = create_rename_keys(UpperCamelCase__ , UpperCamelCase__) for src, dest in rename_keys: rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__) read_in_q_k_v(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__) UpperCamelCase__ : Union[str, Any] = 'huggingface/label-files' UpperCamelCase__ : Tuple = 'imagenet-1k-id2label.json' UpperCamelCase__ : Optional[int] = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='dataset') , 'r')) UpperCamelCase__ : List[Any] = {int(UpperCamelCase__): v for k, v in idalabel.items()} UpperCamelCase__ : Any = idalabel UpperCamelCase__ : Tuple = {v: k for k, v in idalabel.items()} # load HuggingFace model if vit_name[-5:] == "in21k": UpperCamelCase__ : Union[str, Any] = ViTHybridModel(UpperCamelCase__).eval() else: UpperCamelCase__ : int = ViTHybridForImageClassification(UpperCamelCase__).eval() model.load_state_dict(UpperCamelCase__) # create image processor UpperCamelCase__ : Any = create_transform(**resolve_data_config({} , model=UpperCamelCase__)) UpperCamelCase__ : Any = transform.transforms UpperCamelCase__ : Any = { 'bilinear': PILImageResampling.BILINEAR, 'bicubic': PILImageResampling.BICUBIC, 'nearest': PILImageResampling.NEAREST, } UpperCamelCase__ : Any = ViTHybridImageProcessor( do_resize=UpperCamelCase__ , size={'shortest_edge': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=UpperCamelCase__ , crop_size={'height': timm_transforms[1].size[0], 'width': timm_transforms[1].size[1]} , do_normalize=UpperCamelCase__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , ) UpperCamelCase__ : Optional[Any] = prepare_img() UpperCamelCase__ : Tuple = transform(UpperCamelCase__).unsqueeze(0) UpperCamelCase__ : int = processor(UpperCamelCase__ , return_tensors='pt').pixel_values # verify pixel values assert torch.allclose(UpperCamelCase__ , UpperCamelCase__) # verify logits with torch.no_grad(): UpperCamelCase__ : Dict = model(UpperCamelCase__) UpperCamelCase__ : str = outputs.logits print('Predicted class:' , logits.argmax(-1).item()) if base_model: UpperCamelCase__ : Dict = timm_model.forward_features(UpperCamelCase__) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(UpperCamelCase__ , outputs.pooler_output , atol=1e-3) else: UpperCamelCase__ : Tuple = timm_model(UpperCamelCase__) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(UpperCamelCase__ , outputs.logits , atol=1e-3) print('Looks ok!') if pytorch_dump_folder_path is not None: Path(UpperCamelCase__).mkdir(exist_ok=UpperCamelCase__) print(f'Saving model {vit_name} to {pytorch_dump_folder_path}') model.save_pretrained(UpperCamelCase__) print(f'Saving processor to {pytorch_dump_folder_path}') processor.save_pretrained(UpperCamelCase__) if push_to_hub: print(f'Pushing model and processor to the hub {vit_name}') model.push_to_hub(f'ybelkada/{vit_name}') processor.push_to_hub(f'ybelkada/{vit_name}') if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--vit_name', default='vit_base_r50_s16_384', type=str, help='Name of the hybrid ViT timm model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.' ) lowerCAmelCase__ = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
596
def lowerCamelCase_ ( ) -> List[str]: """simple docstring""" __lowerCamelCase = 0 for i in range(1 , 1001 ): total += i**i return str(UpperCamelCase__ )[-10:] if __name__ == "__main__": print(solution())
469
0
from dataclasses import dataclass, field from typing import Optional from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser @dataclass class SCREAMING_SNAKE_CASE_ : __magic_name__: str = field( metadata={"help": "The output directory where the model will be written."} , ) __magic_name__: str = field( metadata={ "help": ( "The encoder model checkpoint for weights initialization." "Don't set if you want to train an encoder model from scratch." ) } , ) __magic_name__: str = field( metadata={ "help": ( "The decoder model checkpoint for weights initialization." "Don't set if you want to train a decoder model from scratch." ) } , ) __magic_name__: Optional[str] = field( default=snake_case_ , metadata={"help": "Pretrained encoder config name or path if not the same as encoder_model_name"} ) __magic_name__: Optional[str] = field( default=snake_case_ , metadata={"help": "Pretrained decoder config name or path if not the same as decoder_model_name"} ) def SCREAMING_SNAKE_CASE__ ( ): snake_case_ : str = HfArgumentParser((ModelArguments,) ) (snake_case_) : Union[str, Any] = parser.parse_args_into_dataclasses() # Load pretrained model and tokenizer # Use explicit specified encoder config if model_args.encoder_config_name: snake_case_ : Optional[Any] = AutoConfig.from_pretrained(model_args.encoder_config_name ) # Use pretrained encoder model's config else: snake_case_ : Optional[Any] = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path ) # Use explicit specified decoder config if model_args.decoder_config_name: snake_case_ : int = AutoConfig.from_pretrained(model_args.decoder_config_name ) # Use pretrained decoder model's config else: snake_case_ : int = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path ) # necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed snake_case_ : List[str] = True snake_case_ : Dict = True snake_case_ : List[Any] = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained( encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=__a , decoder_config=__a , ) # GPT2 only has bos/eos tokens but not decoder_start/pad tokens snake_case_ : Tuple = decoder_config.decoder_start_token_id snake_case_ : List[Any] = decoder_config.pad_token_id if decoder_start_token_id is None: snake_case_ : Optional[int] = decoder_config.bos_token_id if pad_token_id is None: snake_case_ : List[str] = decoder_config.eos_token_id # This is necessary to make Flax's generate() work snake_case_ : str = decoder_config.eos_token_id snake_case_ : Optional[int] = decoder_start_token_id snake_case_ : List[str] = pad_token_id snake_case_ : List[Any] = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path ) snake_case_ : Optional[int] = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path ) snake_case_ : Tuple = tokenizer.convert_ids_to_tokens(model.config.pad_token_id ) model.save_pretrained(model_args.output_dir ) image_processor.save_pretrained(model_args.output_dir ) tokenizer.save_pretrained(model_args.output_dir ) if __name__ == "__main__": main()
717
import gc import unittest from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline from transformers.pipelines import PipelineException from transformers.testing_utils import ( is_pipeline_test, is_torch_available, nested_simplify, require_tf, require_torch, require_torch_gpu, slow, ) from .test_pipelines_common import ANY @is_pipeline_test class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): __magic_name__: Union[str, Any] = MODEL_FOR_MASKED_LM_MAPPING __magic_name__: Optional[int] = TF_MODEL_FOR_MASKED_LM_MAPPING def UpperCAmelCase_ ( self : str ) -> str: """simple docstring""" super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() if is_torch_available(): import torch torch.cuda.empty_cache() @require_tf def UpperCAmelCase_ ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" snake_case_ : int = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , top_k=2 , framework='tf' ) snake_case_ : Optional[Any] = unmasker('My name is <mask>' ) self.assertEqual( nested_simplify(_A , decimals=6 ) , [ {'sequence': 'My name is grouped', 'score': 2.1E-05, 'token': 38015, 'token_str': ' grouped'}, {'sequence': 'My name is accuser', 'score': 2.1E-05, 'token': 25506, 'token_str': ' accuser'}, ] , ) snake_case_ : int = unmasker('The largest city in France is <mask>' ) self.assertEqual( nested_simplify(_A , decimals=6 ) , [ { 'sequence': 'The largest city in France is grouped', 'score': 2.1E-05, 'token': 38015, 'token_str': ' grouped', }, { 'sequence': 'The largest city in France is accuser', 'score': 2.1E-05, 'token': 25506, 'token_str': ' accuser', }, ] , ) snake_case_ : Any = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 ) self.assertEqual( nested_simplify(_A , decimals=6 ) , [ {'sequence': 'My name is Clara', 'score': 2E-05, 'token': 13606, 'token_str': ' Clara'}, {'sequence': 'My name is Patrick', 'score': 2E-05, 'token': 3499, 'token_str': ' Patrick'}, {'sequence': 'My name is Te', 'score': 1.9E-05, 'token': 2941, 'token_str': ' Te'}, ] , ) @require_torch def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" snake_case_ : Tuple = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , top_k=2 , framework='pt' ) snake_case_ : Tuple = unmasker('My name is <mask>' ) self.assertEqual( nested_simplify(_A , decimals=6 ) , [ {'sequence': 'My name is Maul', 'score': 2.2E-05, 'token': 35676, 'token_str': ' Maul'}, {'sequence': 'My name isELS', 'score': 2.2E-05, 'token': 16416, 'token_str': 'ELS'}, ] , ) snake_case_ : int = unmasker('The largest city in France is <mask>' ) self.assertEqual( nested_simplify(_A , decimals=6 ) , [ { 'sequence': 'The largest city in France is Maul', 'score': 2.2E-05, 'token': 35676, 'token_str': ' Maul', }, {'sequence': 'The largest city in France isELS', 'score': 2.2E-05, 'token': 16416, 'token_str': 'ELS'}, ] , ) snake_case_ : Any = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 ) self.assertEqual( nested_simplify(_A , decimals=6 ) , [ {'sequence': 'My name is Patrick', 'score': 2.1E-05, 'token': 3499, 'token_str': ' Patrick'}, {'sequence': 'My name is Te', 'score': 2E-05, 'token': 2941, 'token_str': ' Te'}, {'sequence': 'My name is Clara', 'score': 2E-05, 'token': 13606, 'token_str': ' Clara'}, ] , ) snake_case_ : List[Any] = unmasker('My name is <mask> <mask>' , top_k=2 ) self.assertEqual( nested_simplify(_A , decimals=6 ) , [ [ { 'score': 2.2E-05, 'token': 35676, 'token_str': ' Maul', 'sequence': '<s>My name is Maul<mask></s>', }, {'score': 2.2E-05, 'token': 16416, 'token_str': 'ELS', 'sequence': '<s>My name isELS<mask></s>'}, ], [ { 'score': 2.2E-05, 'token': 35676, 'token_str': ' Maul', 'sequence': '<s>My name is<mask> Maul</s>', }, {'score': 2.2E-05, 'token': 16416, 'token_str': 'ELS', 'sequence': '<s>My name is<mask>ELS</s>'}, ], ] , ) @require_torch_gpu def UpperCAmelCase_ ( self : str ) -> Any: """simple docstring""" snake_case_ : Union[str, Any] = pipeline('fill-mask' , model='hf-internal-testing/tiny-random-distilbert' , device=0 , framework='pt' ) # convert model to fp16 pipe.model.half() snake_case_ : Tuple = pipe('Paris is the [MASK] of France.' ) # We actually don't care about the result, we just want to make sure # it works, meaning the float16 tensor got casted back to float32 # for postprocessing. self.assertIsInstance(_A , _A ) @slow @require_torch def UpperCAmelCase_ ( self : str ) -> List[Any]: """simple docstring""" snake_case_ : Optional[Any] = pipeline(task='fill-mask' , model='distilroberta-base' , top_k=2 , framework='pt' ) self.run_large_test(_A ) @slow @require_tf def UpperCAmelCase_ ( self : Optional[int] ) -> int: """simple docstring""" snake_case_ : Dict = pipeline(task='fill-mask' , model='distilroberta-base' , top_k=2 , framework='tf' ) self.run_large_test(_A ) def UpperCAmelCase_ ( self : Dict , _A : List[Any] ) -> Union[str, Any]: """simple docstring""" snake_case_ : List[Any] = unmasker('My name is <mask>' ) self.assertEqual( nested_simplify(_A ) , [ {'sequence': 'My name is John', 'score': 0.0_0_8, 'token': 610, 'token_str': ' John'}, {'sequence': 'My name is Chris', 'score': 0.0_0_7, 'token': 1573, 'token_str': ' Chris'}, ] , ) snake_case_ : List[Any] = unmasker('The largest city in France is <mask>' ) self.assertEqual( nested_simplify(_A ) , [ { 'sequence': 'The largest city in France is Paris', 'score': 0.2_5_1, 'token': 2201, 'token_str': ' Paris', }, { 'sequence': 'The largest city in France is Lyon', 'score': 0.2_1_4, 'token': 12790, 'token_str': ' Lyon', }, ] , ) snake_case_ : Tuple = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 ) self.assertEqual( nested_simplify(_A ) , [ {'sequence': 'My name is Patrick', 'score': 0.0_0_5, 'token': 3499, 'token_str': ' Patrick'}, {'sequence': 'My name is Clara', 'score': 0.0_0_0, 'token': 13606, 'token_str': ' Clara'}, {'sequence': 'My name is Te', 'score': 0.0_0_0, 'token': 2941, 'token_str': ' Te'}, ] , ) @require_torch def UpperCAmelCase_ ( self : Optional[int] ) -> Any: """simple docstring""" snake_case_ : Optional[Any] = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , framework='pt' ) snake_case_ : Tuple = None snake_case_ : str = None self.run_pipeline_test(_A , [] ) @require_tf def UpperCAmelCase_ ( self : Tuple ) -> Dict: """simple docstring""" snake_case_ : List[Any] = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , framework='tf' ) snake_case_ : List[str] = None snake_case_ : List[str] = None self.run_pipeline_test(_A , [] ) def UpperCAmelCase_ ( self : List[str] , _A : List[Any] , _A : Tuple , _A : Optional[int] ) -> Optional[Any]: """simple docstring""" if tokenizer is None or tokenizer.mask_token_id is None: self.skipTest('The provided tokenizer has no mask token, (probably reformer or wav2vec2)' ) snake_case_ : Dict = FillMaskPipeline(model=_A , tokenizer=_A ) snake_case_ : Optional[Any] = [ F"""This is another {tokenizer.mask_token} test""", ] return fill_masker, examples def UpperCAmelCase_ ( self : Optional[Any] , _A : str , _A : List[Any] ) -> int: """simple docstring""" snake_case_ : Optional[int] = fill_masker.tokenizer snake_case_ : List[Any] = fill_masker.model snake_case_ : int = fill_masker( F"""This is a {tokenizer.mask_token}""" , ) self.assertEqual( _A , [ {'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )}, {'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )}, {'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )}, {'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )}, {'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )}, ] , ) snake_case_ : Dict = fill_masker([F"""This is a {tokenizer.mask_token}"""] ) self.assertEqual( _A , [ {'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )}, {'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )}, {'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )}, {'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )}, {'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )}, ] , ) snake_case_ : Optional[int] = fill_masker([F"""This is a {tokenizer.mask_token}""", F"""Another {tokenizer.mask_token} great test."""] ) self.assertEqual( _A , [ [ {'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )}, {'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )}, {'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )}, {'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )}, {'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )}, ], [ {'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )}, {'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )}, {'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )}, {'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )}, {'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )}, ], ] , ) with self.assertRaises(_A ): fill_masker([None] ) # No mask_token is not supported with self.assertRaises(_A ): fill_masker('This is' ) self.run_test_top_k(_A , _A ) self.run_test_targets(_A , _A ) self.run_test_top_k_targets(_A , _A ) self.fill_mask_with_duplicate_targets_and_top_k(_A , _A ) self.fill_mask_with_multiple_masks(_A , _A ) def UpperCAmelCase_ ( self : Optional[Any] , _A : Any , _A : Optional[int] ) -> Any: """simple docstring""" snake_case_ : Dict = tokenizer.get_vocab() snake_case_ : List[Any] = sorted(vocab.keys() )[:2] # Pipeline argument snake_case_ : Dict = FillMaskPipeline(model=_A , tokenizer=_A , targets=_A ) snake_case_ : Optional[int] = fill_masker(F"""This is a {tokenizer.mask_token}""" ) self.assertEqual( _A , [ {'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )}, {'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )}, ] , ) snake_case_ : List[str] = {vocab[el] for el in targets} self.assertEqual({el['token'] for el in outputs} , _A ) snake_case_ : int = [tokenizer.decode([x] ) for x in target_ids] self.assertEqual({el['token_str'] for el in outputs} , set(_A ) ) # Call argument snake_case_ : Dict = FillMaskPipeline(model=_A , tokenizer=_A ) snake_case_ : Optional[int] = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets=_A ) self.assertEqual( _A , [ {'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )}, {'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )}, ] , ) snake_case_ : Any = {vocab[el] for el in targets} self.assertEqual({el['token'] for el in outputs} , _A ) snake_case_ : Tuple = [tokenizer.decode([x] ) for x in target_ids] self.assertEqual({el['token_str'] for el in outputs} , set(_A ) ) # Score equivalence snake_case_ : Union[str, Any] = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets=_A ) snake_case_ : Any = [top_mask['token_str'] for top_mask in outputs] snake_case_ : Optional[Any] = [top_mask['score'] for top_mask in outputs] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(_A ) == set(_A ): snake_case_ : int = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets=_A ) snake_case_ : Union[str, Any] = [top_mask['score'] for top_mask in unmasked_targets] self.assertEqual(nested_simplify(_A ) , nested_simplify(_A ) ) # Raises with invalid with self.assertRaises(_A ): snake_case_ : Dict = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets=[] ) # For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised if "" not in tokenizer.get_vocab(): with self.assertRaises(_A ): snake_case_ : Tuple = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets=[''] ) with self.assertRaises(_A ): snake_case_ : Union[str, Any] = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets='' ) def UpperCAmelCase_ ( self : Tuple , _A : Any , _A : Optional[Any] ) -> Any: """simple docstring""" snake_case_ : str = FillMaskPipeline(model=_A , tokenizer=_A , top_k=2 ) snake_case_ : List[str] = fill_masker(F"""This is a {tokenizer.mask_token}""" ) self.assertEqual( _A , [ {'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )}, {'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )}, ] , ) snake_case_ : Any = FillMaskPipeline(model=_A , tokenizer=_A ) snake_case_ : int = fill_masker(F"""This is a {tokenizer.mask_token}""" , top_k=2 ) self.assertEqual( _A , [ {'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )}, {'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )}, ] , ) self.assertEqual(nested_simplify(_A ) , nested_simplify(_A ) ) def UpperCAmelCase_ ( self : Tuple , _A : Any , _A : Dict ) -> str: """simple docstring""" snake_case_ : str = tokenizer.get_vocab() snake_case_ : Tuple = FillMaskPipeline(model=_A , tokenizer=_A ) # top_k=2, ntargets=3 snake_case_ : str = sorted(vocab.keys() )[:3] snake_case_ : Union[str, Any] = fill_masker(F"""This is a {tokenizer.mask_token}""" , top_k=2 , targets=_A ) # If we use the most probably targets, and filter differently, we should still # have the same results snake_case_ : Any = [el['token_str'] for el in sorted(_A , key=lambda _A : x["score"] , reverse=_A )] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(_A ).issubset(_A ): snake_case_ : str = fill_masker(F"""This is a {tokenizer.mask_token}""" , top_k=3 , targets=_A ) # They should yield exactly the same result self.assertEqual(nested_simplify(_A ) , nested_simplify(_A ) ) def UpperCAmelCase_ ( self : str , _A : Dict , _A : Tuple ) -> Dict: """simple docstring""" snake_case_ : Tuple = FillMaskPipeline(model=_A , tokenizer=_A ) snake_case_ : List[Any] = tokenizer.get_vocab() # String duplicates + id duplicates snake_case_ : str = sorted(vocab.keys() )[:3] snake_case_ : Tuple = [targets[0], targets[1], targets[0], targets[2], targets[1]] snake_case_ : str = fill_masker(F"""My name is {tokenizer.mask_token}""" , targets=_A , top_k=10 ) # The target list contains duplicates, so we can't output more # than them self.assertEqual(len(_A ) , 3 ) def UpperCAmelCase_ ( self : List[str] , _A : str , _A : Union[str, Any] ) -> Optional[Any]: """simple docstring""" snake_case_ : Union[str, Any] = FillMaskPipeline(model=_A , tokenizer=_A ) snake_case_ : List[str] = fill_masker( F"""This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}""" , top_k=2 ) self.assertEqual( _A , [ [ {'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )}, {'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )}, ], [ {'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )}, {'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )}, ], [ {'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )}, {'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )}, ], ] , )
534
0
def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase__ = len(SCREAMING_SNAKE_CASE ) for i in range(length - 1 ): lowercase__ = i for k in range(i + 1 , SCREAMING_SNAKE_CASE ): if collection[k] < collection[least]: lowercase__ = k if least != i: lowercase__ , lowercase__ = (collection[i], collection[least]) return collection if __name__ == "__main__": lowerCAmelCase = input('Enter numbers separated by a comma:\n').strip() lowerCAmelCase = [int(item) for item in user_input.split(',')] print(selection_sort(unsorted))
43
import argparse import json import os from collections import OrderedDict import numpy as np import tensorflow as tf import torch def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase__ = os.path.join(args.tf_model_dir , '''parameters.json''' ) lowercase__ = json.loads(open(SCREAMING_SNAKE_CASE ).read() ) if not params: raise ValueError( f'It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.' ) if not args.output.endswith('''.pt''' ): lowercase__ = args.output + '''.pt''' lowercase__ = OrderedDict() with tf.device('''/CPU:0''' ): lowercase__ = tf.train.load_checkpoint(args.tf_model_dir ) lowercase__ = reader.get_variable_to_shape_map() for key_name in shapes.keys(): lowercase__ = reader.get_tensor(SCREAMING_SNAKE_CASE ).astype(np.floataa ) if key_name.endswith('''/adam_m''' ) or key_name.endswith('''/adam_v''' ): continue if key_name.startswith('''pasts/''' ): if key_name.startswith('''pasts/mlp''' ): lowercase__ = int(key_name[9] ) elif key_name.startswith('''pasts/out''' ): lowercase__ = 8 lowercase__ = '''model.sqout.%d.weight''' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name.startswith('''model/moe''' ): lowercase__ = int(key_name[9:].split('''/''' )[0] ) if key_name.endswith('''/switch_gating/kernel''' ): lowercase__ = '''model.blocks.%d.feed_forward.mlp.router.classifier.weight''' % player lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name.endswith('''/softmlp/kernel''' ): lowercase__ = '''model.blocks.%d.feed_forward.soft_bypass_mlp.weight''' % player lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name.endswith('''/wo/kernel''' ) or key_name.endswith('''/wi/kernel''' ): lowercase__ = key_name[-9:-7] for i in range(16 ): lowercase__ = '''model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight''' % (player, i, nlayer) lowercase__ = ( vnp[i].transpose([1, 0] ).copy() ) # In Mesh-Tensorflow, it is one array, so it is divided lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name.startswith('''model/mlp''' ): lowercase__ = int(key_name[9:].split('''/''' )[0] ) if key_name.endswith('''/p1/kernel''' ): lowercase__ = '''model.blocks.%d.feed_forward.mlp.wi.weight''' % player lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name.endswith('''/p1/bias''' ): lowercase__ = '''model.blocks.%d.feed_forward.mlp.wi.bias''' % player lowercase__ = vnp.copy() # same because it is one dimensional lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name.endswith('''/p2/kernel''' ): lowercase__ = '''model.blocks.%d.feed_forward.mlp.wo.weight''' % player lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name.endswith('''/p2/bias''' ): lowercase__ = '''model.blocks.%d.feed_forward.mlp.wo.bias''' % player lowercase__ = vnp.copy() # same because it is one dimensional lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name.startswith('''model/ln''' ): lowercase__ = int(key_name[8:].split('''/''' )[0] ) if key_name.endswith('''/b''' ): lowercase__ = '''model.blocks.%d.feed_forward.norm.bias''' % player lowercase__ = vnp.copy() # same because it is one dimensional lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name.endswith('''/g''' ): lowercase__ = '''model.blocks.%d.feed_forward.norm.weight''' % player lowercase__ = vnp.copy() # same because it is one dimensional lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name.startswith('''model/att''' ): lowercase__ = int(key_name[9:].split('''/''' )[0] ) if key_name.endswith('''/qkv/kernel''' ): lowercase__ = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum lowercase__ = state[:, 0, :, :] lowercase__ = state[:, 1, :, :] lowercase__ = state[:, 2, :, :] lowercase__ = ( state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix lowercase__ = ( state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix lowercase__ = ( state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix lowercase__ = '''model.blocks.%d.self_attn.self_attn.q_proj.weight''' % player lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) lowercase__ = '''model.blocks.%d.self_attn.self_attn.k_proj.weight''' % player lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) lowercase__ = '''model.blocks.%d.self_attn.self_attn.v_proj.weight''' % player lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name.endswith('''/o/kernel''' ): lowercase__ = '''model.blocks.%d.self_attn.self_attn.out_proj.weight''' % player lowercase__ = ( vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy() ) # Mesh-Tensorflow is a diagonal matrix lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name.startswith('''model/an''' ): lowercase__ = int(key_name[8:].split('''/''' )[0] ) if key_name.endswith('''/b''' ): lowercase__ = '''model.blocks.%d.self_attn.norm.bias''' % player lowercase__ = vnp.copy() # same because it is one dimensional lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name.endswith('''/g''' ): lowercase__ = '''model.blocks.%d.self_attn.norm.weight''' % player lowercase__ = vnp.copy() # same because it is one dimensional lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif ( key_name.startswith('''model/wte''' ) or key_name.startswith('''model/wpe''' ) or key_name.startswith('''model/ete''' ) ): lowercase__ = {'''wte''': '''embed_tokens''', '''wpe''': '''position_embeddings''', '''ete''': '''extra_position_embeddings'''}[ key_name[-3:] ] lowercase__ = '''model.%s.weight''' % nlayer lowercase__ = vnp.copy() # same in embedded lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) if key_name.startswith('''model/wte''' ): lowercase__ = '''lm_head.weight''' lowercase__ = vnp.copy() # same in embedded lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name.startswith('''model/wob''' ): lowercase__ = '''final_logits_bias''' lowercase__ = vnp.copy() # same in embedded lowercase__ = state.reshape((1, -1) ) lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name == "model/dense/kernel": lowercase__ = '''model.last_project.weight''' lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name == "model/dense_1/bias": lowercase__ = '''model.last_project.bias''' lowercase__ = vnp.copy() # same because it is one dimensional lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) torch.save(SCREAMING_SNAKE_CASE , args.output ) if __name__ == "__main__": lowerCAmelCase = argparse.ArgumentParser( description='model converter.', formatter_class=argparse.ArgumentDefaultsHelpFormatter ) parser.add_argument('--tf_model_dir', metavar='PATH', type=str, required=True, help='import model') parser.add_argument('--output', metavar='PATH', type=str, required=True, help='output model') lowerCAmelCase = parser.parse_args() convert_tf_gptsan_to_pt(args)
43
1
"""simple docstring""" import os import shutil from pathlib import Path from typing import Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging if is_onnx_available(): import onnxruntime as ort __A = logging.get_logger(__name__) __A = { """tensor(bool)""": np.bool_, """tensor(int8)""": np.inta, """tensor(uint8)""": np.uinta, """tensor(int16)""": np.intaa, """tensor(uint16)""": np.uintaa, """tensor(int32)""": np.intaa, """tensor(uint32)""": np.uintaa, """tensor(int64)""": np.intaa, """tensor(uint64)""": np.uintaa, """tensor(float16)""": np.floataa, """tensor(float)""": np.floataa, """tensor(double)""": np.floataa, } class _lowerCAmelCase : """simple docstring""" def __init__( self , __UpperCAmelCase=None , **__UpperCAmelCase ): '''simple docstring''' logger.info('`diffusers.OnnxRuntimeModel` is experimental and might change in the future.' ) lowerCAmelCase__ :Tuple = model lowerCAmelCase__ :Optional[Any] = kwargs.get('model_save_dir' , __UpperCAmelCase ) lowerCAmelCase__ :Tuple = kwargs.get('latest_model_name' , __UpperCAmelCase ) def __call__( self , **__UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Optional[Any] = {k: np.array(__UpperCAmelCase ) for k, v in kwargs.items()} return self.model.run(__UpperCAmelCase , __UpperCAmelCase ) @staticmethod def snake_case ( __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None ): '''simple docstring''' if provider is None: logger.info('No onnxruntime provider specified, using CPUExecutionProvider' ) lowerCAmelCase__ :Optional[Any] = 'CPUExecutionProvider' return ort.InferenceSession(__UpperCAmelCase , providers=[provider] , sess_options=__UpperCAmelCase ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :str = file_name if file_name is not None else ONNX_WEIGHTS_NAME lowerCAmelCase__ :List[Any] = self.model_save_dir.joinpath(self.latest_model_name ) lowerCAmelCase__ :List[str] = Path(__UpperCAmelCase ).joinpath(__UpperCAmelCase ) try: shutil.copyfile(__UpperCAmelCase , __UpperCAmelCase ) except shutil.SameFileError: pass # copy external weights (for models >2GB) lowerCAmelCase__ :Union[str, Any] = self.model_save_dir.joinpath(__UpperCAmelCase ) if src_path.exists(): lowerCAmelCase__ :Optional[Any] = Path(__UpperCAmelCase ).joinpath(__UpperCAmelCase ) try: shutil.copyfile(__UpperCAmelCase , __UpperCAmelCase ) except shutil.SameFileError: pass def snake_case ( self , __UpperCAmelCase , **__UpperCAmelCase , ): '''simple docstring''' if os.path.isfile(__UpperCAmelCase ): logger.error(F"Provided path ({save_directory}) should be a directory, not a file" ) return os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase ) # saving model weights/files self._save_pretrained(__UpperCAmelCase , **__UpperCAmelCase ) @classmethod def snake_case ( cls , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ): '''simple docstring''' lowerCAmelCase__ :Optional[int] = file_name if file_name is not None else ONNX_WEIGHTS_NAME # load model from local directory if os.path.isdir(__UpperCAmelCase ): lowerCAmelCase__ :Union[str, Any] = OnnxRuntimeModel.load_model( os.path.join(__UpperCAmelCase , __UpperCAmelCase ) , provider=__UpperCAmelCase , sess_options=__UpperCAmelCase ) lowerCAmelCase__ :str = Path(__UpperCAmelCase ) # load model from hub else: # download model lowerCAmelCase__ :Union[str, Any] = hf_hub_download( repo_id=__UpperCAmelCase , filename=__UpperCAmelCase , use_auth_token=__UpperCAmelCase , revision=__UpperCAmelCase , cache_dir=__UpperCAmelCase , force_download=__UpperCAmelCase , ) lowerCAmelCase__ :List[str] = Path(__UpperCAmelCase ).parent lowerCAmelCase__ :Union[str, Any] = Path(__UpperCAmelCase ).name lowerCAmelCase__ :str = OnnxRuntimeModel.load_model(__UpperCAmelCase , provider=__UpperCAmelCase , sess_options=__UpperCAmelCase ) return cls(model=__UpperCAmelCase , **__UpperCAmelCase ) @classmethod def snake_case ( cls , __UpperCAmelCase , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ): '''simple docstring''' lowerCAmelCase__ :List[Any] = None if len(str(__UpperCAmelCase ).split('@' ) ) == 2: lowerCAmelCase__ :Tuple = model_id.split('@' ) return cls._from_pretrained( model_id=__UpperCAmelCase , revision=__UpperCAmelCase , cache_dir=__UpperCAmelCase , force_download=__UpperCAmelCase , use_auth_token=__UpperCAmelCase , **__UpperCAmelCase , )
717
"""simple docstring""" from typing import Dict from .base import GenericTensor, Pipeline class _lowerCAmelCase ( a ): """simple docstring""" def snake_case ( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ): '''simple docstring''' if tokenize_kwargs is None: lowerCAmelCase__ :List[Any] = {} if truncation is not None: if "truncation" in tokenize_kwargs: raise ValueError( 'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' ) lowerCAmelCase__ :List[str] = truncation lowerCAmelCase__ :Union[str, Any] = tokenize_kwargs lowerCAmelCase__ :List[str] = {} if return_tensors is not None: lowerCAmelCase__ :List[str] = return_tensors return preprocess_params, {}, postprocess_params def snake_case ( self , __UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Dict = self.framework lowerCAmelCase__ :Optional[Any] = self.tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ) return model_inputs def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Tuple = self.model(**__UpperCAmelCase ) return model_outputs def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=False ): '''simple docstring''' if return_tensors: return model_outputs[0] if self.framework == "pt": return model_outputs[0].tolist() elif self.framework == "tf": return model_outputs[0].numpy().tolist() def __call__( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' return super().__call__(*__UpperCAmelCase , **__UpperCAmelCase )
560
0
'''simple docstring''' import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler SCREAMING_SNAKE_CASE_: Tuple =16 SCREAMING_SNAKE_CASE_: Tuple =32 def lowerCAmelCase_ ( snake_case_ : Accelerator , snake_case_ : int = 16 , snake_case_ : str = "bert-base-cased" ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase_ = AutoTokenizer.from_pretrained(snake_case_ ) UpperCAmelCase_ = load_dataset("glue" , "mrpc" ) def tokenize_function(snake_case_ : Optional[Any] ): # max_length=None => use the model max length (it's actually the default) UpperCAmelCase_ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=snake_case_ , max_length=snake_case_ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset UpperCAmelCase_ = datasets.map( snake_case_ , batched=snake_case_ , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=snake_case_ ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library UpperCAmelCase_ = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(snake_case_ : Union[str, Any] ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(snake_case_ , padding="max_length" , max_length=1_28 , return_tensors="pt" ) return tokenizer.pad(snake_case_ , padding="longest" , return_tensors="pt" ) # Instantiate dataloaders. UpperCAmelCase_ = DataLoader( tokenized_datasets["train"] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ ) UpperCAmelCase_ = DataLoader( tokenized_datasets["validation"] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ ) return train_dataloader, eval_dataloader def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : str ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase_ = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs UpperCAmelCase_ = config["lr"] UpperCAmelCase_ = int(config["num_epochs"] ) UpperCAmelCase_ = int(config["seed"] ) UpperCAmelCase_ = int(config["batch_size"] ) UpperCAmelCase_ = args.model_name_or_path set_seed(snake_case_ ) UpperCAmelCase_ , UpperCAmelCase_ = get_dataloaders(snake_case_ , snake_case_ , snake_case_ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) UpperCAmelCase_ = AutoModelForSequenceClassification.from_pretrained(snake_case_ , return_dict=snake_case_ ) # Instantiate optimizer UpperCAmelCase_ = ( AdamW if accelerator.state.deepspeed_plugin is None or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) UpperCAmelCase_ = optimizer_cls(params=model.parameters() , lr=snake_case_ ) if accelerator.state.deepspeed_plugin is not None: UpperCAmelCase_ = accelerator.state.deepspeed_plugin.deepspeed_config[ "gradient_accumulation_steps" ] else: UpperCAmelCase_ = 1 UpperCAmelCase_ = (len(snake_case_ ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): UpperCAmelCase_ = get_linear_schedule_with_warmup( optimizer=snake_case_ , num_warmup_steps=0 , num_training_steps=snake_case_ , ) else: UpperCAmelCase_ = DummyScheduler(snake_case_ , total_num_steps=snake_case_ , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare( snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) # We need to keep track of how many total steps we have iterated over UpperCAmelCase_ = 0 # We also need to keep track of the stating epoch so files are named properly UpperCAmelCase_ = 0 # Now we train the model UpperCAmelCase_ = evaluate.load("glue" , "mrpc" ) UpperCAmelCase_ = 0 UpperCAmelCase_ = {} for epoch in range(snake_case_ , snake_case_ ): model.train() for step, batch in enumerate(snake_case_ ): UpperCAmelCase_ = model(**snake_case_ ) UpperCAmelCase_ = outputs.loss UpperCAmelCase_ = loss / gradient_accumulation_steps accelerator.backward(snake_case_ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 model.eval() UpperCAmelCase_ = 0 for step, batch in enumerate(snake_case_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): UpperCAmelCase_ = model(**snake_case_ ) UpperCAmelCase_ = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather( (predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(snake_case_ ) - 1: UpperCAmelCase_ = predictions[: len(eval_dataloader.dataset ) - samples_seen] UpperCAmelCase_ = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=snake_case_ , references=snake_case_ , ) UpperCAmelCase_ = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"""epoch {epoch}:""" , snake_case_ ) UpperCAmelCase_ = eval_metric["accuracy"] if best_performance < eval_metric["accuracy"]: UpperCAmelCase_ = eval_metric["accuracy"] if args.performance_lower_bound is not None: assert ( args.performance_lower_bound <= best_performance ), f"""Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}""" accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , "all_results.json" ) , "w" ) as f: json.dump(snake_case_ , snake_case_ ) def lowerCAmelCase_ ( ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase_ = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." ) parser.add_argument( "--model_name_or_path" , type=snake_case_ , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=snake_case_ , ) parser.add_argument( "--output_dir" , type=snake_case_ , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , ) parser.add_argument( "--performance_lower_bound" , type=snake_case_ , default=snake_case_ , help="Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value." , ) parser.add_argument( "--num_epochs" , type=snake_case_ , default=3 , help="Number of train epochs." , ) UpperCAmelCase_ = parser.parse_args() UpperCAmelCase_ = {"lr": 2E-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16} training_function(snake_case_ , snake_case_ ) if __name__ == "__main__": main()
78
import os import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers.models.realm.configuration_realm import RealmConfig from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer class a ( __SCREAMING_SNAKE_CASE ): """simple docstring""" def UpperCAmelCase_ ( self : Tuple ) -> int: """simple docstring""" __lowercase = tempfile.mkdtemp() __lowercase = 5 # Realm tok __lowercase = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''test''', '''question''', '''this''', '''is''', '''the''', '''first''', '''second''', '''third''', '''fourth''', '''fifth''', '''record''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] __lowercase = os.path.join(self.tmpdirname , '''realm_tokenizer''' ) os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ ) __lowercase = os.path.join(lowerCamelCase__ , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) __lowercase = os.path.join(self.tmpdirname , '''realm_block_records''' ) os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ ) def UpperCAmelCase_ ( self : List[str] ) -> RealmTokenizer: """simple docstring""" return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''realm_tokenizer''' ) ) def UpperCAmelCase_ ( self : List[Any] ) -> Optional[Any]: """simple docstring""" shutil.rmtree(self.tmpdirname ) def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[int]: """simple docstring""" __lowercase = RealmConfig(num_block_records=self.num_block_records ) return config def UpperCAmelCase_ ( self : Any ) -> Optional[int]: """simple docstring""" __lowercase = Dataset.from_dict( { '''id''': ['''0''', '''1'''], '''question''': ['''foo''', '''bar'''], '''answers''': [['''Foo''', '''Bar'''], ['''Bar''']], } ) return dataset def UpperCAmelCase_ ( self : int ) -> List[Any]: """simple docstring""" __lowercase = np.array( [ b'''This is the first record''', b'''This is the second record''', b'''This is the third record''', b'''This is the fourth record''', b'''This is the fifth record''', b'''This is a longer longer longer record''', ] , dtype=lowerCamelCase__ , ) return block_records def UpperCAmelCase_ ( self : List[Any] ) -> Union[str, Any]: """simple docstring""" __lowercase = RealmRetriever( block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , ) return retriever def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple: """simple docstring""" __lowercase = self.get_config() __lowercase = self.get_dummy_retriever() __lowercase = retriever.tokenizer __lowercase = np.array([0, 3] , dtype='''long''' ) __lowercase = tokenizer(['''Test question'''] ).input_ids __lowercase = tokenizer( ['''the fourth'''] , add_special_tokens=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , ).input_ids __lowercase = config.reader_seq_len __lowercase , __lowercase , __lowercase , __lowercase = retriever( lowerCamelCase__ , lowerCamelCase__ , answer_ids=lowerCamelCase__ , max_length=lowerCamelCase__ , return_tensors='''np''' ) self.assertEqual(len(lowerCamelCase__ ) , 2 ) self.assertEqual(len(lowerCamelCase__ ) , 2 ) self.assertEqual(len(lowerCamelCase__ ) , 2 ) self.assertEqual(concat_inputs.input_ids.shape , (2, 10) ) self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) ) self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) ) self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) ) self.assertEqual( tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''first''', '''record''', '''[SEP]'''] , ) self.assertEqual( tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''fourth''', '''record''', '''[SEP]'''] , ) def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]: """simple docstring""" __lowercase = self.get_config() __lowercase = self.get_dummy_retriever() __lowercase = retriever.tokenizer __lowercase = np.array([0, 3, 5] , dtype='''long''' ) __lowercase = tokenizer(['''Test question'''] ).input_ids __lowercase = tokenizer( ['''the fourth''', '''longer longer'''] , add_special_tokens=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , ).input_ids __lowercase = config.reader_seq_len __lowercase , __lowercase , __lowercase , __lowercase = retriever( lowerCamelCase__ , lowerCamelCase__ , answer_ids=lowerCamelCase__ , max_length=lowerCamelCase__ , return_tensors='''np''' ) self.assertEqual([False, True, True] , lowerCamelCase__ ) self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , lowerCamelCase__ ) self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , lowerCamelCase__ ) def UpperCAmelCase_ ( self : int ) -> str: """simple docstring""" __lowercase = self.get_dummy_retriever() retriever.save_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) ) # Test local path __lowercase = retriever.from_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) ) self.assertEqual(retriever.block_records[0] , b'''This is the first record''' ) # Test mocked remote path with patch('''transformers.models.realm.retrieval_realm.hf_hub_download''' ) as mock_hf_hub_download: __lowercase = os.path.join( os.path.join(self.tmpdirname , '''realm_block_records''' ) , _REALM_BLOCK_RECORDS_FILENAME ) __lowercase = RealmRetriever.from_pretrained('''google/realm-cc-news-pretrained-openqa''' ) self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
332
0
'''simple docstring''' from typing import Dict from .base import GenericTensor, Pipeline class lowercase ( UpperCAmelCase_ ): """simple docstring""" def lowerCAmelCase__ ( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , **UpperCamelCase_ ): '''simple docstring''' if tokenize_kwargs is None: UpperCamelCase__ :Any = {} if truncation is not None: if "truncation" in tokenize_kwargs: raise ValueError( '''truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)''' ) UpperCamelCase__ :Union[str, Any] = truncation UpperCamelCase__ :Union[str, Any] = tokenize_kwargs UpperCamelCase__ :Tuple = {} if return_tensors is not None: UpperCamelCase__ :Union[str, Any] = return_tensors return preprocess_params, {}, postprocess_params def lowerCAmelCase__ ( self , UpperCamelCase_ , **UpperCamelCase_ ): '''simple docstring''' UpperCamelCase__ :Dict = self.framework UpperCamelCase__ :Optional[Any] = self.tokenizer(_snake_case , return_tensors=_snake_case , **_snake_case ) return model_inputs def lowerCAmelCase__ ( self , UpperCamelCase_ ): '''simple docstring''' UpperCamelCase__ :Dict = self.model(**_snake_case ) return model_outputs def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=False ): '''simple docstring''' if return_tensors: return model_outputs[0] if self.framework == "pt": return model_outputs[0].tolist() elif self.framework == "tf": return model_outputs[0].numpy().tolist() def __call__( self , *UpperCamelCase_ , **UpperCamelCase_ ): '''simple docstring''' return super().__call__(*_snake_case , **_snake_case )
708
'''simple docstring''' import contextlib import copy import random from typing import Any, Dict, Iterable, Optional, Union import numpy as np import torch from .utils import deprecate, is_transformers_available if is_transformers_available(): import transformers def a ( __a ) -> Optional[int]: '''simple docstring''' random.seed(__a ) np.random.seed(__a ) torch.manual_seed(__a ) torch.cuda.manual_seed_all(__a ) # ^^ safe to call this function even if cuda is not available class lowercase : """simple docstring""" def __init__( self , UpperCamelCase_ , UpperCamelCase_ = 0.9999 , UpperCamelCase_ = 0.0 , UpperCamelCase_ = 0 , UpperCamelCase_ = False , UpperCamelCase_ = 1.0 , UpperCamelCase_ = 2 / 3 , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ): '''simple docstring''' if isinstance(UpperCamelCase_ , torch.nn.Module ): UpperCamelCase__ :Optional[Any] = ( '''Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. ''' '''Please pass the parameters of the module instead.''' ) deprecate( '''passing a `torch.nn.Module` to `ExponentialMovingAverage`''' , '''1.0.0''' , UpperCamelCase_ , standard_warn=UpperCamelCase_ , ) UpperCamelCase__ :Optional[int] = parameters.parameters() # set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility UpperCamelCase__ :str = True if kwargs.get('''max_value''' , UpperCamelCase_ ) is not None: UpperCamelCase__ :List[str] = '''The `max_value` argument is deprecated. Please use `decay` instead.''' deprecate('''max_value''' , '''1.0.0''' , UpperCamelCase_ , standard_warn=UpperCamelCase_ ) UpperCamelCase__ :int = kwargs['''max_value'''] if kwargs.get('''min_value''' , UpperCamelCase_ ) is not None: UpperCamelCase__ :Union[str, Any] = '''The `min_value` argument is deprecated. Please use `min_decay` instead.''' deprecate('''min_value''' , '''1.0.0''' , UpperCamelCase_ , standard_warn=UpperCamelCase_ ) UpperCamelCase__ :Any = kwargs['''min_value'''] UpperCamelCase__ :Optional[int] = list(UpperCamelCase_ ) UpperCamelCase__ :Tuple = [p.clone().detach() for p in parameters] if kwargs.get('''device''' , UpperCamelCase_ ) is not None: UpperCamelCase__ :str = '''The `device` argument is deprecated. Please use `to` instead.''' deprecate('''device''' , '''1.0.0''' , UpperCamelCase_ , standard_warn=UpperCamelCase_ ) self.to(device=kwargs['''device'''] ) UpperCamelCase__ :Union[str, Any] = None UpperCamelCase__ :List[Any] = decay UpperCamelCase__ :List[str] = min_decay UpperCamelCase__ :Optional[int] = update_after_step UpperCamelCase__ :int = use_ema_warmup UpperCamelCase__ :Any = inv_gamma UpperCamelCase__ :Union[str, Any] = power UpperCamelCase__ :Union[str, Any] = 0 UpperCamelCase__ :Dict = None # set in `step()` UpperCamelCase__ :List[Any] = model_cls UpperCamelCase__ :Optional[Any] = model_config @classmethod def lowerCAmelCase__ ( cls , UpperCamelCase_ , UpperCamelCase_ ): '''simple docstring''' UpperCamelCase__ , UpperCamelCase__ :Tuple = model_cls.load_config(UpperCamelCase_ , return_unused_kwargs=UpperCamelCase_ ) UpperCamelCase__ :List[str] = model_cls.from_pretrained(UpperCamelCase_ ) UpperCamelCase__ :str = cls(model.parameters() , model_cls=UpperCamelCase_ , model_config=model.config ) ema_model.load_state_dict(UpperCamelCase_ ) return ema_model def lowerCAmelCase__ ( self , UpperCamelCase_ ): '''simple docstring''' if self.model_cls is None: raise ValueError('''`save_pretrained` can only be used if `model_cls` was defined at __init__.''' ) if self.model_config is None: raise ValueError('''`save_pretrained` can only be used if `model_config` was defined at __init__.''' ) UpperCamelCase__ :Optional[Any] = self.model_cls.from_config(self.model_config ) UpperCamelCase__ :Optional[int] = self.state_dict() state_dict.pop('''shadow_params''' , UpperCamelCase_ ) model.register_to_config(**UpperCamelCase_ ) self.copy_to(model.parameters() ) model.save_pretrained(UpperCamelCase_ ) def lowerCAmelCase__ ( self , UpperCamelCase_ ): '''simple docstring''' UpperCamelCase__ :str = max(0 , optimization_step - self.update_after_step - 1 ) if step <= 0: return 0.0 if self.use_ema_warmup: UpperCamelCase__ :Dict = 1 - (1 + step / self.inv_gamma) ** -self.power else: UpperCamelCase__ :int = (1 + step) / (10 + step) UpperCamelCase__ :Any = min(UpperCamelCase_ , self.decay ) # make sure decay is not smaller than min_decay UpperCamelCase__ :Dict = max(UpperCamelCase_ , self.min_decay ) return cur_decay_value @torch.no_grad() def lowerCAmelCase__ ( self , UpperCamelCase_ ): '''simple docstring''' if isinstance(UpperCamelCase_ , torch.nn.Module ): UpperCamelCase__ :Optional[Any] = ( '''Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. ''' '''Please pass the parameters of the module instead.''' ) deprecate( '''passing a `torch.nn.Module` to `ExponentialMovingAverage.step`''' , '''1.0.0''' , UpperCamelCase_ , standard_warn=UpperCamelCase_ , ) UpperCamelCase__ :str = parameters.parameters() UpperCamelCase__ :Dict = list(UpperCamelCase_ ) self.optimization_step += 1 # Compute the decay factor for the exponential moving average. UpperCamelCase__ :Any = self.get_decay(self.optimization_step ) UpperCamelCase__ :Tuple = decay UpperCamelCase__ :List[Any] = 1 - decay UpperCamelCase__ :Optional[Any] = contextlib.nullcontext if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled(): import deepspeed for s_param, param in zip(self.shadow_params , UpperCamelCase_ ): if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled(): UpperCamelCase__ :Dict = deepspeed.zero.GatheredParameters(UpperCamelCase_ , modifier_rank=UpperCamelCase_ ) with context_manager(): if param.requires_grad: s_param.sub_(one_minus_decay * (s_param - param) ) else: s_param.copy_(UpperCamelCase_ ) def lowerCAmelCase__ ( self , UpperCamelCase_ ): '''simple docstring''' UpperCamelCase__ :Tuple = list(UpperCamelCase_ ) for s_param, param in zip(self.shadow_params , UpperCamelCase_ ): param.data.copy_(s_param.to(param.device ).data ) def lowerCAmelCase__ ( self , UpperCamelCase_=None , UpperCamelCase_=None ): '''simple docstring''' UpperCamelCase__ :Tuple = [ p.to(device=UpperCamelCase_ , dtype=UpperCamelCase_ ) if p.is_floating_point() else p.to(device=UpperCamelCase_ ) for p in self.shadow_params ] def lowerCAmelCase__ ( self ): '''simple docstring''' return { "decay": self.decay, "min_decay": self.min_decay, "optimization_step": self.optimization_step, "update_after_step": self.update_after_step, "use_ema_warmup": self.use_ema_warmup, "inv_gamma": self.inv_gamma, "power": self.power, "shadow_params": self.shadow_params, } def lowerCAmelCase__ ( self , UpperCamelCase_ ): '''simple docstring''' UpperCamelCase__ :List[str] = [param.detach().cpu().clone() for param in parameters] def lowerCAmelCase__ ( self , UpperCamelCase_ ): '''simple docstring''' if self.temp_stored_params is None: raise RuntimeError('''This ExponentialMovingAverage has no `store()`ed weights ''' '''to `restore()`''' ) for c_param, param in zip(self.temp_stored_params , UpperCamelCase_ ): param.data.copy_(c_param.data ) # Better memory-wise. UpperCamelCase__ :Optional[Any] = None def lowerCAmelCase__ ( self , UpperCamelCase_ ): '''simple docstring''' UpperCamelCase__ :int = copy.deepcopy(UpperCamelCase_ ) UpperCamelCase__ :Dict = state_dict.get('''decay''' , self.decay ) if self.decay < 0.0 or self.decay > 1.0: raise ValueError('''Decay must be between 0 and 1''' ) UpperCamelCase__ :Union[str, Any] = state_dict.get('''min_decay''' , self.min_decay ) if not isinstance(self.min_decay , UpperCamelCase_ ): raise ValueError('''Invalid min_decay''' ) UpperCamelCase__ :Union[str, Any] = state_dict.get('''optimization_step''' , self.optimization_step ) if not isinstance(self.optimization_step , UpperCamelCase_ ): raise ValueError('''Invalid optimization_step''' ) UpperCamelCase__ :List[Any] = state_dict.get('''update_after_step''' , self.update_after_step ) if not isinstance(self.update_after_step , UpperCamelCase_ ): raise ValueError('''Invalid update_after_step''' ) UpperCamelCase__ :List[str] = state_dict.get('''use_ema_warmup''' , self.use_ema_warmup ) if not isinstance(self.use_ema_warmup , UpperCamelCase_ ): raise ValueError('''Invalid use_ema_warmup''' ) UpperCamelCase__ :Optional[int] = state_dict.get('''inv_gamma''' , self.inv_gamma ) if not isinstance(self.inv_gamma , (float, int) ): raise ValueError('''Invalid inv_gamma''' ) UpperCamelCase__ :str = state_dict.get('''power''' , self.power ) if not isinstance(self.power , (float, int) ): raise ValueError('''Invalid power''' ) UpperCamelCase__ :Tuple = state_dict.get('''shadow_params''' , UpperCamelCase_ ) if shadow_params is not None: UpperCamelCase__ :Dict = shadow_params if not isinstance(self.shadow_params , UpperCamelCase_ ): raise ValueError('''shadow_params must be a list''' ) if not all(isinstance(UpperCamelCase_ , torch.Tensor ) for p in self.shadow_params ): raise ValueError('''shadow_params must all be Tensors''' )
280
0
from typing import Dict, List, Optional, Union import numpy as np from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy _A : Union[str, Any] = logging.get_logger(__name__) class __snake_case ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self , A_ , A_ , A_ , **A_ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = feature_size SCREAMING_SNAKE_CASE__ = sampling_rate SCREAMING_SNAKE_CASE__ = padding_value SCREAMING_SNAKE_CASE__ = kwargs.pop('''padding_side''' , '''right''' ) SCREAMING_SNAKE_CASE__ = kwargs.pop('''return_attention_mask''' , A_ ) super().__init__(**A_ ) def lowercase_ ( self , A_ , A_ = True , A_ = None , A_ = False , A_ = None , A_ = None , A_ = None , ): '''simple docstring''' if isinstance(A_ , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ): SCREAMING_SNAKE_CASE__ = { key: [example[key] for example in processed_features] for key in processed_features[0].keys() } # The model's main input name, usually `input_values`, has be passed for padding if self.model_input_names[0] not in processed_features: raise ValueError( '''You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`''' f''' to this method that includes {self.model_input_names[0]}, but you provided''' f''' {list(processed_features.keys() )}''' ) SCREAMING_SNAKE_CASE__ = processed_features[self.model_input_names[0]] SCREAMING_SNAKE_CASE__ = ( return_attention_mask if return_attention_mask is not None else self.return_attention_mask ) if len(A_ ) == 0: if return_attention_mask: SCREAMING_SNAKE_CASE__ = [] return processed_features # If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays # and rebuild them afterwards if no return_tensors is specified # Note that we lose the specific device the tensor may be on for PyTorch SCREAMING_SNAKE_CASE__ = required_input[0] if isinstance(A_ , (list, tuple) ): # first_element might be an empty list/tuple in some edge cases so we grab the first non empty element. SCREAMING_SNAKE_CASE__ = 0 while len(required_input[index] ) == 0: index += 1 if index < len(A_ ): SCREAMING_SNAKE_CASE__ = required_input[index][0] if return_tensors is None: if is_tf_tensor(A_ ): SCREAMING_SNAKE_CASE__ = '''tf''' elif is_torch_tensor(A_ ): SCREAMING_SNAKE_CASE__ = '''pt''' elif isinstance(A_ , (int, float, list, tuple, np.ndarray) ): SCREAMING_SNAKE_CASE__ = '''np''' else: raise ValueError( f'''type of {first_element} unknown: {type(A_ )}. ''' '''Should be one of a python, numpy, pytorch or tensorflow object.''' ) for key, value in processed_features.items(): if isinstance(value[0] , (int, float) ): SCREAMING_SNAKE_CASE__ = to_numpy(A_ ) else: SCREAMING_SNAKE_CASE__ = [to_numpy(A_ ) for v in value] # Convert padding_strategy in PaddingStrategy SCREAMING_SNAKE_CASE__ = self._get_padding_strategies(padding=A_ , max_length=A_ ) SCREAMING_SNAKE_CASE__ = processed_features[self.model_input_names[0]] SCREAMING_SNAKE_CASE__ = len(A_ ) if not all(len(A_ ) == batch_size for v in processed_features.values() ): raise ValueError('''Some items in the output dictionary have a different batch size than others.''' ) SCREAMING_SNAKE_CASE__ = [] for i in range(A_ ): SCREAMING_SNAKE_CASE__ = {k: v[i] for k, v in processed_features.items()} # truncation SCREAMING_SNAKE_CASE__ = self._truncate( A_ , max_length=A_ , pad_to_multiple_of=A_ , truncation=A_ , ) truncated_inputs.append(A_ ) if padding_strategy == PaddingStrategy.LONGEST: # make sure that `max_length` cannot be longer than the longest truncated length SCREAMING_SNAKE_CASE__ = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs ) SCREAMING_SNAKE_CASE__ = PaddingStrategy.MAX_LENGTH SCREAMING_SNAKE_CASE__ = {} for i in range(A_ ): # padding SCREAMING_SNAKE_CASE__ = self._pad( truncated_inputs[i] , max_length=A_ , padding_strategy=A_ , pad_to_multiple_of=A_ , return_attention_mask=A_ , ) for key, value in outputs.items(): if key not in batch_outputs: SCREAMING_SNAKE_CASE__ = [] if value.dtype is np.dtype(np.floataa ): SCREAMING_SNAKE_CASE__ = value.astype(np.floataa ) batch_outputs[key].append(A_ ) return BatchFeature(A_ , tensor_type=A_ ) def lowercase_ ( self , A_ , A_ = None , A_ = PaddingStrategy.DO_NOT_PAD , A_ = None , A_ = None , ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = processed_features[self.model_input_names[0]] if padding_strategy == PaddingStrategy.LONGEST: SCREAMING_SNAKE_CASE__ = len(A_ ) if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): SCREAMING_SNAKE_CASE__ = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of SCREAMING_SNAKE_CASE__ = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(A_ ) < max_length if return_attention_mask and "attention_mask" not in processed_features: SCREAMING_SNAKE_CASE__ = np.ones(len(A_ ) , dtype=np.intaa ) if needs_to_be_padded: SCREAMING_SNAKE_CASE__ = max_length - len(A_ ) if self.padding_side == "right": if return_attention_mask: SCREAMING_SNAKE_CASE__ = np.pad( processed_features['''attention_mask'''] , (0, difference) ) SCREAMING_SNAKE_CASE__ = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference) SCREAMING_SNAKE_CASE__ = np.pad( A_ , A_ , '''constant''' , constant_values=self.padding_value ) elif self.padding_side == "left": if return_attention_mask: SCREAMING_SNAKE_CASE__ = np.pad( processed_features['''attention_mask'''] , (difference, 0) ) SCREAMING_SNAKE_CASE__ = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0) SCREAMING_SNAKE_CASE__ = np.pad( A_ , A_ , '''constant''' , constant_values=self.padding_value ) else: raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) ) return processed_features def lowercase_ ( self , A_ , A_ = None , A_ = None , A_ = None , ): '''simple docstring''' if not truncation: return processed_features elif truncation and max_length is None: raise ValueError('''When setting ``truncation=True``, make sure that ``max_length`` is defined.''' ) SCREAMING_SNAKE_CASE__ = processed_features[self.model_input_names[0]] # find `max_length` that fits `pad_to_multiple_of` if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): SCREAMING_SNAKE_CASE__ = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of SCREAMING_SNAKE_CASE__ = len(A_ ) > max_length if needs_to_be_truncated: SCREAMING_SNAKE_CASE__ = processed_features[self.model_input_names[0]][:max_length] if "attention_mask" in processed_features: SCREAMING_SNAKE_CASE__ = processed_features['''attention_mask'''][:max_length] return processed_features def lowercase_ ( self , A_=False , A_=None ): '''simple docstring''' if padding is not False: if padding is True: SCREAMING_SNAKE_CASE__ = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch elif not isinstance(A_ , A_ ): SCREAMING_SNAKE_CASE__ = PaddingStrategy(A_ ) elif isinstance(A_ , A_ ): SCREAMING_SNAKE_CASE__ = padding else: SCREAMING_SNAKE_CASE__ = PaddingStrategy.DO_NOT_PAD # Set max length if needed if max_length is None: if padding_strategy == PaddingStrategy.MAX_LENGTH: raise ValueError( f'''When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined''' ) # Test if we have a padding value if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None): raise ValueError( '''Asking to pad but the feature_extractor does not have a padding value. Please select a value to use''' ''' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.''' ) return padding_strategy
100
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available a :str = { "configuration_ernie": ["ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ErnieConfig", "ErnieOnnxConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a :str = [ "ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST", "ErnieForCausalLM", "ErnieForMaskedLM", "ErnieForMultipleChoice", "ErnieForNextSentencePrediction", "ErnieForPreTraining", "ErnieForQuestionAnswering", "ErnieForSequenceClassification", "ErnieForTokenClassification", "ErnieModel", "ErniePreTrainedModel", ] if TYPE_CHECKING: from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ernie import ( ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST, ErnieForCausalLM, ErnieForMaskedLM, ErnieForMultipleChoice, ErnieForNextSentencePrediction, ErnieForPreTraining, ErnieForQuestionAnswering, ErnieForSequenceClassification, ErnieForTokenClassification, ErnieModel, ErniePreTrainedModel, ) else: import sys a :Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
680
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available __magic_name__ = {'''configuration_speech_encoder_decoder''': ['''SpeechEncoderDecoderConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = ['''SpeechEncoderDecoderModel'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = ['''FlaxSpeechEncoderDecoderModel'''] if TYPE_CHECKING: from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel else: import sys __magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
314
import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, AutoConfig, AutoFeatureExtractor, WavaVecaConfig, WavaVecaFeatureExtractor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils''')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 __magic_name__ = get_tests_dir('''fixtures''') __magic_name__ = get_tests_dir('''fixtures/dummy_feature_extractor_config.json''') __magic_name__ = get_tests_dir('''fixtures/dummy-config.json''') class a__ ( unittest.TestCase ): """simple docstring""" def __UpperCAmelCase ( self :str ): lowercase = 0 def __UpperCAmelCase ( self :Tuple ): lowercase = AutoFeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h' ) self.assertIsInstance(lowercase__ , lowercase__ ) def __UpperCAmelCase ( self :Any ): lowercase = AutoFeatureExtractor.from_pretrained(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) def __UpperCAmelCase ( self :int ): with tempfile.TemporaryDirectory() as tmpdirname: lowercase = WavaVecaConfig() # remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally lowercase = AutoFeatureExtractor.from_pretrained(lowercase__ ).to_dict() config_dict.pop('feature_extractor_type' ) lowercase = WavaVecaFeatureExtractor(**lowercase__ ) # save in new folder model_config.save_pretrained(lowercase__ ) config.save_pretrained(lowercase__ ) lowercase = AutoFeatureExtractor.from_pretrained(lowercase__ ) # make sure private variable is not incorrectly saved lowercase = json.loads(config.to_json_string() ) self.assertTrue('_processor_class' not in dict_as_saved ) self.assertIsInstance(lowercase__ , lowercase__ ) def __UpperCAmelCase ( self :List[Any] ): lowercase = AutoFeatureExtractor.from_pretrained(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) def __UpperCAmelCase ( self :List[Any] ): with self.assertRaisesRegex( lowercase__ , 'bert-base is not a local folder and is not a valid model identifier' ): lowercase = AutoFeatureExtractor.from_pretrained('bert-base' ) def __UpperCAmelCase ( self :List[str] ): with self.assertRaisesRegex( lowercase__ , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ): lowercase = AutoFeatureExtractor.from_pretrained(lowercase__ , revision='aaaaaa' ) def __UpperCAmelCase ( self :Any ): with self.assertRaisesRegex( lowercase__ , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ): lowercase = AutoFeatureExtractor.from_pretrained('hf-internal-testing/config-no-model' ) def __UpperCAmelCase ( self :Tuple ): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(lowercase__ ): lowercase = AutoFeatureExtractor.from_pretrained( 'hf-internal-testing/test_dynamic_feature_extractor' ) # If remote code is disabled, we can't load this config. with self.assertRaises(lowercase__ ): lowercase = AutoFeatureExtractor.from_pretrained( 'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase__ ) lowercase = AutoFeatureExtractor.from_pretrained( 'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase__ ) self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' ) # Test feature extractor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(lowercase__ ) lowercase = AutoFeatureExtractor.from_pretrained(lowercase__ , trust_remote_code=lowercase__ ) self.assertEqual(reloaded_feature_extractor.__class__.__name__ , 'NewFeatureExtractor' ) def __UpperCAmelCase ( self :Optional[int] ): try: AutoConfig.register('custom' , lowercase__ ) AutoFeatureExtractor.register(lowercase__ , lowercase__ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(lowercase__ ): AutoFeatureExtractor.register(lowercase__ , lowercase__ ) # Now that the config is registered, it can be used as any other config with the auto-API lowercase = CustomFeatureExtractor.from_pretrained(lowercase__ ) with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(lowercase__ ) lowercase = AutoFeatureExtractor.from_pretrained(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] def __UpperCAmelCase ( self :Any ): class a__ ( _snake_case ): """simple docstring""" A__ : Union[str, Any] = True try: AutoConfig.register('custom' , lowercase__ ) AutoFeatureExtractor.register(lowercase__ , lowercase__ ) # If remote code is not set, the default is to use local lowercase = AutoFeatureExtractor.from_pretrained( 'hf-internal-testing/test_dynamic_feature_extractor' ) self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' ) self.assertTrue(feature_extractor.is_local ) # If remote code is disabled, we load the local one. lowercase = AutoFeatureExtractor.from_pretrained( 'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase__ ) self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' ) self.assertTrue(feature_extractor.is_local ) # If remote is enabled, we load from the Hub lowercase = AutoFeatureExtractor.from_pretrained( 'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase__ ) self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' ) self.assertTrue(not hasattr(lowercase__ , 'is_local' ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
314
1
'''simple docstring''' import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ConditionalDetrImageProcessor class lowercase_ ( unittest.TestCase ): def __init__( self , UpperCamelCase__ , UpperCamelCase__=7 , UpperCamelCase__=3 , UpperCamelCase__=3_0 , UpperCamelCase__=4_0_0 , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=True , UpperCamelCase__=[0.5, 0.5, 0.5] , UpperCamelCase__=[0.5, 0.5, 0.5] , UpperCamelCase__=True , UpperCamelCase__=1 / 2_5_5 , UpperCamelCase__=True , ) -> str: """simple docstring""" UpperCAmelCase_ = size if size is not None else {"shortest_edge": 1_8, "longest_edge": 1_3_3_3} UpperCAmelCase_ = parent UpperCAmelCase_ = batch_size UpperCAmelCase_ = num_channels UpperCAmelCase_ = min_resolution UpperCAmelCase_ = max_resolution UpperCAmelCase_ = do_resize UpperCAmelCase_ = size UpperCAmelCase_ = do_normalize UpperCAmelCase_ = image_mean UpperCAmelCase_ = image_std UpperCAmelCase_ = do_rescale UpperCAmelCase_ = rescale_factor UpperCAmelCase_ = do_pad def lowerCamelCase_ ( self ) -> Optional[int]: """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__=False ) -> str: """simple docstring""" if not batched: UpperCAmelCase_ = image_inputs[0] if isinstance(UpperCamelCase__ , Image.Image ): UpperCAmelCase_ , UpperCAmelCase_ = image.size else: UpperCAmelCase_ , UpperCAmelCase_ = image.shape[1], image.shape[2] if w < h: UpperCAmelCase_ = int(self.size["shortest_edge"] * h / w ) UpperCAmelCase_ = self.size["shortest_edge"] elif w > h: UpperCAmelCase_ = self.size["shortest_edge"] UpperCAmelCase_ = int(self.size["shortest_edge"] * w / h ) else: UpperCAmelCase_ = self.size["shortest_edge"] UpperCAmelCase_ = self.size["shortest_edge"] else: UpperCAmelCase_ = [] for image in image_inputs: UpperCAmelCase_ , UpperCAmelCase_ = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) UpperCAmelCase_ = max(UpperCamelCase__ , key=lambda UpperCamelCase__ : item[0] )[0] UpperCAmelCase_ = max(UpperCamelCase__ , key=lambda UpperCamelCase__ : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class lowercase_ ( _A , unittest.TestCase ): a_ = ConditionalDetrImageProcessor if is_vision_available() else None def lowerCamelCase_ ( self ) -> Dict: """simple docstring""" UpperCAmelCase_ = ConditionalDetrImageProcessingTester(self ) @property def lowerCamelCase_ ( self ) -> Tuple: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def lowerCamelCase_ ( self ) -> Any: """simple docstring""" UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCamelCase__ , "image_mean" ) ) self.assertTrue(hasattr(UpperCamelCase__ , "image_std" ) ) self.assertTrue(hasattr(UpperCamelCase__ , "do_normalize" ) ) self.assertTrue(hasattr(UpperCamelCase__ , "do_resize" ) ) self.assertTrue(hasattr(UpperCamelCase__ , "size" ) ) def lowerCamelCase_ ( self ) -> int: """simple docstring""" UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"shortest_edge": 1_8, "longest_edge": 1_3_3_3} ) self.assertEqual(image_processor.do_pad , UpperCamelCase__ ) UpperCAmelCase_ = self.image_processing_class.from_dict( self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=UpperCamelCase__ ) self.assertEqual(image_processor.size , {"shortest_edge": 4_2, "longest_edge": 8_4} ) self.assertEqual(image_processor.do_pad , UpperCamelCase__ ) def lowerCamelCase_ ( self ) -> str: """simple docstring""" pass def lowerCamelCase_ ( self ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase__ , Image.Image ) # Test not batched input UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCamelCase__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCamelCase__ , batched=UpperCamelCase__ ) UpperCAmelCase_ = image_processing(UpperCamelCase__ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def lowerCamelCase_ ( self ) -> Tuple: """simple docstring""" UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase__ , np.ndarray ) # Test not batched input UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCamelCase__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCAmelCase_ = image_processing(UpperCamelCase__ , return_tensors="pt" ).pixel_values UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCamelCase__ , batched=UpperCamelCase__ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def lowerCamelCase_ ( self ) -> Dict: """simple docstring""" UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase__ , torch.Tensor ) # Test not batched input UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCamelCase__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCAmelCase_ = image_processing(UpperCamelCase__ , return_tensors="pt" ).pixel_values UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCamelCase__ , batched=UpperCamelCase__ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def lowerCamelCase_ ( self ) -> List[str]: """simple docstring""" UpperCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f: UpperCAmelCase_ = json.loads(f.read() ) UpperCAmelCase_ = {"image_id": 3_9_7_6_9, "annotations": target} # encode them UpperCAmelCase_ = ConditionalDetrImageProcessor.from_pretrained("microsoft/conditional-detr-resnet-50" ) UpperCAmelCase_ = image_processing(images=UpperCamelCase__ , annotations=UpperCamelCase__ , return_tensors="pt" ) # verify pixel values UpperCAmelCase_ = torch.Size([1, 3, 8_0_0, 1_0_6_6] ) self.assertEqual(encoding["pixel_values"].shape , UpperCamelCase__ ) UpperCAmelCase_ = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , UpperCamelCase__ , atol=1e-4 ) ) # verify area UpperCAmelCase_ = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , UpperCamelCase__ ) ) # verify boxes UpperCAmelCase_ = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , UpperCamelCase__ ) UpperCAmelCase_ = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , UpperCamelCase__ , atol=1e-3 ) ) # verify image_id UpperCAmelCase_ = torch.tensor([3_9_7_6_9] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , UpperCamelCase__ ) ) # verify is_crowd UpperCAmelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , UpperCamelCase__ ) ) # verify class_labels UpperCAmelCase_ = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , UpperCamelCase__ ) ) # verify orig_size UpperCAmelCase_ = torch.tensor([4_8_0, 6_4_0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , UpperCamelCase__ ) ) # verify size UpperCAmelCase_ = torch.tensor([8_0_0, 1_0_6_6] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , UpperCamelCase__ ) ) @slow def lowerCamelCase_ ( self ) -> Dict: """simple docstring""" UpperCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f: UpperCAmelCase_ = json.loads(f.read() ) UpperCAmelCase_ = {"file_name": "000000039769.png", "image_id": 3_9_7_6_9, "segments_info": target} UpperCAmelCase_ = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" ) # encode them UpperCAmelCase_ = ConditionalDetrImageProcessor(format="coco_panoptic" ) UpperCAmelCase_ = image_processing(images=UpperCamelCase__ , annotations=UpperCamelCase__ , masks_path=UpperCamelCase__ , return_tensors="pt" ) # verify pixel values UpperCAmelCase_ = torch.Size([1, 3, 8_0_0, 1_0_6_6] ) self.assertEqual(encoding["pixel_values"].shape , UpperCamelCase__ ) UpperCAmelCase_ = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , UpperCamelCase__ , atol=1e-4 ) ) # verify area UpperCAmelCase_ = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , UpperCamelCase__ ) ) # verify boxes UpperCAmelCase_ = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , UpperCamelCase__ ) UpperCAmelCase_ = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , UpperCamelCase__ , atol=1e-3 ) ) # verify image_id UpperCAmelCase_ = torch.tensor([3_9_7_6_9] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , UpperCamelCase__ ) ) # verify is_crowd UpperCAmelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , UpperCamelCase__ ) ) # verify class_labels UpperCAmelCase_ = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , UpperCamelCase__ ) ) # verify masks UpperCAmelCase_ = 8_2_2_8_7_3 self.assertEqual(encoding["labels"][0]["masks"].sum().item() , UpperCamelCase__ ) # verify orig_size UpperCAmelCase_ = torch.tensor([4_8_0, 6_4_0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , UpperCamelCase__ ) ) # verify size UpperCAmelCase_ = torch.tensor([8_0_0, 1_0_6_6] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , UpperCamelCase__ ) )
660
'''simple docstring''' from typing import List from .keymap import KEYMAP, get_character def lowerCamelCase__ ( A_ ): def decorator(A_ ): UpperCAmelCase_ = getattr(A_ , "handle_key" , [] ) handle += [key] setattr(A_ , "handle_key" , A_ ) return func return decorator def lowerCamelCase__ ( *A_ ): def decorator(A_ ): UpperCAmelCase_ = getattr(A_ , "handle_key" , [] ) handle += keys setattr(A_ , "handle_key" , A_ ) return func return decorator class lowercase_ ( _A ): def __new__( cls , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Dict: """simple docstring""" UpperCAmelCase_ = super().__new__(cls , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) if not hasattr(UpperCamelCase__ , "key_handler" ): setattr(UpperCamelCase__ , "key_handler" , {} ) setattr(UpperCamelCase__ , "handle_input" , KeyHandler.handle_input ) for value in attrs.values(): UpperCAmelCase_ = getattr(UpperCamelCase__ , "handle_key" , [] ) for key in handled_keys: UpperCAmelCase_ = value return new_cls @staticmethod def lowerCamelCase_ ( cls ) -> str: """simple docstring""" UpperCAmelCase_ = get_character() if char != KEYMAP["undefined"]: UpperCAmelCase_ = ord(UpperCamelCase__ ) UpperCAmelCase_ = cls.key_handler.get(UpperCamelCase__ ) if handler: UpperCAmelCase_ = char return handler(cls ) else: return None def lowerCamelCase__ ( cls ): return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
660
1
'''simple docstring''' import math from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP class UpperCAmelCase__ ( snake_case__ ): snake_case_ = 42 snake_case_ = None def lowercase ( _a ,_a=0.999 ,_a="cosine" ,) -> str: if alpha_transform_type == "cosine": def alpha_bar_fn(_a ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(_a ): return math.exp(t * -12.0 ) else: raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}" ) UpperCAmelCase_: List[str] = [] for i in range(_a ): UpperCAmelCase_: Optional[Any] = i / num_diffusion_timesteps UpperCAmelCase_: Optional[int] = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(_a ) / alpha_bar_fn(_a ) ,_a ) ) return torch.tensor(_a ,dtype=torch.floataa ) class UpperCAmelCase__ ( snake_case__ , snake_case__ ): @register_to_config def __init__( self , A__ = 1000 , A__ = "fixed_small_log" , A__ = True , A__ = 1.0 , A__ = "epsilon" , A__ = "squaredcos_cap_v2" , ): """simple docstring""" if beta_schedule != "squaredcos_cap_v2": raise ValueError("UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'" ) UpperCAmelCase_: List[Any] = betas_for_alpha_bar(A__ ) UpperCAmelCase_: str = 1.0 - self.betas UpperCAmelCase_: Any = torch.cumprod(self.alphas , dim=0 ) UpperCAmelCase_: int = torch.tensor(1.0 ) # standard deviation of the initial noise distribution UpperCAmelCase_: str = 1.0 # setable values UpperCAmelCase_: Tuple = None UpperCAmelCase_: List[str] = torch.from_numpy(np.arange(0 , A__ )[::-1].copy() ) UpperCAmelCase_: Any = variance_type def snake_case_ ( self , A__ , A__ = None ): """simple docstring""" return sample def snake_case_ ( self , A__ , A__ = None ): """simple docstring""" UpperCAmelCase_: Tuple = num_inference_steps UpperCAmelCase_: Optional[int] = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1) UpperCAmelCase_: Optional[int] = (np.arange(0 , A__ ) * step_ratio).round()[::-1].copy().astype(np.intaa ) UpperCAmelCase_: int = torch.from_numpy(A__ ).to(A__ ) def snake_case_ ( self , A__ , A__=None , A__=None , A__=None ): """simple docstring""" if prev_timestep is None: UpperCAmelCase_: Dict = t - 1 UpperCAmelCase_: Any = self.alphas_cumprod[t] UpperCAmelCase_: Tuple = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one UpperCAmelCase_: Optional[int] = 1 - alpha_prod_t UpperCAmelCase_: Dict = 1 - alpha_prod_t_prev if prev_timestep == t - 1: UpperCAmelCase_: List[str] = self.betas[t] else: UpperCAmelCase_: Optional[Any] = 1 - alpha_prod_t / alpha_prod_t_prev # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample UpperCAmelCase_: List[Any] = beta_prod_t_prev / beta_prod_t * beta if variance_type is None: UpperCAmelCase_: int = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small_log": UpperCAmelCase_: str = torch.log(torch.clamp(A__ , min=1E-20 ) ) UpperCAmelCase_: List[Any] = torch.exp(0.5 * variance ) elif variance_type == "learned_range": # NOTE difference with DDPM scheduler UpperCAmelCase_: Tuple = variance.log() UpperCAmelCase_: List[str] = beta.log() UpperCAmelCase_: Optional[int] = (predicted_variance + 1) / 2 UpperCAmelCase_: Any = frac * max_log + (1 - frac) * min_log return variance def snake_case_ ( self , A__ , A__ , A__ , A__ = None , A__=None , A__ = True , ): """simple docstring""" UpperCAmelCase_: str = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range": UpperCAmelCase_: Optional[int] = torch.split(A__ , sample.shape[1] , dim=1 ) else: UpperCAmelCase_: Dict = None # 1. compute alphas, betas if prev_timestep is None: UpperCAmelCase_: Any = t - 1 UpperCAmelCase_: Union[str, Any] = self.alphas_cumprod[t] UpperCAmelCase_: Any = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one UpperCAmelCase_: Any = 1 - alpha_prod_t UpperCAmelCase_: List[Any] = 1 - alpha_prod_t_prev if prev_timestep == t - 1: UpperCAmelCase_: Union[str, Any] = self.betas[t] UpperCAmelCase_: Any = self.alphas[t] else: UpperCAmelCase_: List[Any] = 1 - alpha_prod_t / alpha_prod_t_prev UpperCAmelCase_: int = 1 - beta # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": UpperCAmelCase_: Optional[int] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": UpperCAmelCase_: Dict = model_output else: raise ValueError( F"prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`" " for the UnCLIPScheduler." ) # 3. Clip "predicted x_0" if self.config.clip_sample: UpperCAmelCase_: Any = torch.clamp( A__ , -self.config.clip_sample_range , self.config.clip_sample_range ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf UpperCAmelCase_: Any = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t UpperCAmelCase_: Tuple = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf UpperCAmelCase_: Tuple = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise UpperCAmelCase_: Optional[int] = 0 if t > 0: UpperCAmelCase_: List[Any] = randn_tensor( model_output.shape , dtype=model_output.dtype , generator=A__ , device=model_output.device ) UpperCAmelCase_: Any = self._get_variance( A__ , predicted_variance=A__ , prev_timestep=A__ , ) if self.variance_type == "fixed_small_log": UpperCAmelCase_: Tuple = variance elif self.variance_type == "learned_range": UpperCAmelCase_: Any = (0.5 * variance).exp() else: raise ValueError( F"variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`" " for the UnCLIPScheduler." ) UpperCAmelCase_: Union[str, Any] = variance * variance_noise UpperCAmelCase_: Tuple = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return UnCLIPSchedulerOutput(prev_sample=A__ , pred_original_sample=A__ ) def snake_case_ ( self , A__ , A__ , A__ , ): """simple docstring""" UpperCAmelCase_: List[str] = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype ) UpperCAmelCase_: List[str] = timesteps.to(original_samples.device ) UpperCAmelCase_: Optional[Any] = alphas_cumprod[timesteps] ** 0.5 UpperCAmelCase_: Optional[Any] = sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ): UpperCAmelCase_: Any = sqrt_alpha_prod.unsqueeze(-1 ) UpperCAmelCase_: Any = (1 - alphas_cumprod[timesteps]) ** 0.5 UpperCAmelCase_: List[Any] = sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ): UpperCAmelCase_: Tuple = sqrt_one_minus_alpha_prod.unsqueeze(-1 ) UpperCAmelCase_: int = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples
718
_lowerCAmelCase = 9.8_06_65 def lowercase ( _a ,_a ,_a = g ) -> float: if fluid_density <= 0: raise ValueError("Impossible fluid density" ) if volume < 0: raise ValueError("Impossible Object volume" ) if gravity <= 0: raise ValueError("Impossible Gravity" ) return fluid_density * gravity * volume if __name__ == "__main__": import doctest # run doctest doctest.testmod()
306
0
import json import os import shutil import warnings from argparse import ArgumentParser, Namespace from pathlib import Path from typing import List from ..utils import logging from . import BaseTransformersCLICommand try: from cookiecutter.main import cookiecutter A__ : List[Any] = True except ImportError: A__ : List[Any] = False A__ : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name def UpperCamelCase( __UpperCamelCase : Namespace ): return AddNewModelCommand(args.testing ,args.testing_file ,path=args.path ) class __snake_case ( UpperCamelCase_ ): @staticmethod def UpperCAmelCase__ ( A_ : ArgumentParser): lowerCAmelCase_ : Optional[Any] = parser.add_parser('''add-new-model''') add_new_model_parser.add_argument('''--testing''' , action='''store_true''' , help='''If in testing mode.''') add_new_model_parser.add_argument('''--testing_file''' , type=A_ , help='''Configuration file on which to run.''') add_new_model_parser.add_argument( '''--path''' , type=A_ , help='''Path to cookiecutter. Should only be used for testing purposes.''') add_new_model_parser.set_defaults(func=A_) def __init__( self : List[str] , A_ : bool , A_ : str , A_ : Any=None , *A_ : str): lowerCAmelCase_ : str = testing lowerCAmelCase_ : Optional[Any] = testing_file lowerCAmelCase_ : str = path def UpperCAmelCase__ ( self : List[str]): warnings.warn( '''The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. ''' '''It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality ''' '''checks, you should use `transformers-cli add-new-model-like` instead.''') if not _has_cookiecutter: raise ImportError( '''Model creation dependencies are required to use the `add_new_model` command. Install them by running ''' '''the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n''') # Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory lowerCAmelCase_ : Dict = [directory for directory in os.listdir() if '''cookiecutter-template-''' == directory[:2_2]] if len(A_) > 0: raise ValueError( '''Several directories starting with `cookiecutter-template-` in current working directory. ''' '''Please clean your directory by removing all folders starting with `cookiecutter-template-` or ''' '''change your working directory.''') lowerCAmelCase_ : Optional[int] = ( Path(A_).parent.parent.parent.parent if self._path is None else Path(self._path).parent.parent ) lowerCAmelCase_ : List[Any] = path_to_transformer_root / '''templates''' / '''adding_a_new_model''' # Execute cookiecutter if not self._testing: cookiecutter(str(A_)) else: with open(self._testing_file , '''r''') as configuration_file: lowerCAmelCase_ : int = json.load(A_) cookiecutter( str(path_to_cookiecutter if self._path is None else self._path) , no_input=A_ , extra_context=A_ , ) lowerCAmelCase_ : List[str] = [directory for directory in os.listdir() if '''cookiecutter-template-''' in directory[:2_2]][0] # Retrieve configuration with open(directory + '''/configuration.json''' , '''r''') as configuration_file: lowerCAmelCase_ : List[str] = json.load(A_) lowerCAmelCase_ : List[Any] = configuration['''lowercase_modelname'''] lowerCAmelCase_ : List[Any] = configuration['''generate_tensorflow_pytorch_and_flax'''] os.remove(F"""{directory}/configuration.json""") lowerCAmelCase_ : int = '''PyTorch''' in generate_tensorflow_pytorch_and_flax lowerCAmelCase_ : List[Any] = '''TensorFlow''' in generate_tensorflow_pytorch_and_flax lowerCAmelCase_ : Union[str, Any] = '''Flax''' in generate_tensorflow_pytorch_and_flax lowerCAmelCase_ : List[Any] = F"""{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}""" os.makedirs(A_ , exist_ok=A_) os.makedirs(F"""{path_to_transformer_root}/tests/models/{lowercase_model_name}""" , exist_ok=A_) # Tests require submodules as they have parent imports with open(F"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py""" , '''w'''): pass shutil.move( F"""{directory}/__init__.py""" , F"""{model_dir}/__init__.py""" , ) shutil.move( F"""{directory}/configuration_{lowercase_model_name}.py""" , F"""{model_dir}/configuration_{lowercase_model_name}.py""" , ) def remove_copy_lines(A_ : Union[str, Any]): with open(A_ , '''r''') as f: lowerCAmelCase_ : Optional[int] = f.readlines() with open(A_ , '''w''') as f: for line in lines: if "# Copied from transformers." not in line: f.write(A_) if output_pytorch: if not self._testing: remove_copy_lines(F"""{directory}/modeling_{lowercase_model_name}.py""") shutil.move( F"""{directory}/modeling_{lowercase_model_name}.py""" , F"""{model_dir}/modeling_{lowercase_model_name}.py""" , ) shutil.move( F"""{directory}/test_modeling_{lowercase_model_name}.py""" , F"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py""" , ) else: os.remove(F"""{directory}/modeling_{lowercase_model_name}.py""") os.remove(F"""{directory}/test_modeling_{lowercase_model_name}.py""") if output_tensorflow: if not self._testing: remove_copy_lines(F"""{directory}/modeling_tf_{lowercase_model_name}.py""") shutil.move( F"""{directory}/modeling_tf_{lowercase_model_name}.py""" , F"""{model_dir}/modeling_tf_{lowercase_model_name}.py""" , ) shutil.move( F"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" , F"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py""" , ) else: os.remove(F"""{directory}/modeling_tf_{lowercase_model_name}.py""") os.remove(F"""{directory}/test_modeling_tf_{lowercase_model_name}.py""") if output_flax: if not self._testing: remove_copy_lines(F"""{directory}/modeling_flax_{lowercase_model_name}.py""") shutil.move( F"""{directory}/modeling_flax_{lowercase_model_name}.py""" , F"""{model_dir}/modeling_flax_{lowercase_model_name}.py""" , ) shutil.move( F"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" , F"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py""" , ) else: os.remove(F"""{directory}/modeling_flax_{lowercase_model_name}.py""") os.remove(F"""{directory}/test_modeling_flax_{lowercase_model_name}.py""") shutil.move( F"""{directory}/{lowercase_model_name}.md""" , F"""{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md""" , ) shutil.move( F"""{directory}/tokenization_{lowercase_model_name}.py""" , F"""{model_dir}/tokenization_{lowercase_model_name}.py""" , ) shutil.move( F"""{directory}/tokenization_fast_{lowercase_model_name}.py""" , F"""{model_dir}/tokenization_{lowercase_model_name}_fast.py""" , ) from os import fdopen, remove from shutil import copymode, move from tempfile import mkstemp def replace(A_ : str , A_ : str , A_ : List[str]): # Create temp file lowerCAmelCase_ , lowerCAmelCase_ : str = mkstemp() lowerCAmelCase_ : List[str] = False with fdopen(A_ , '''w''') as new_file: with open(A_) as old_file: for line in old_file: new_file.write(A_) if line_to_copy_below in line: lowerCAmelCase_ : Optional[Any] = True for line_to_copy in lines_to_copy: new_file.write(A_) if not line_found: raise ValueError(F"""Line {line_to_copy_below} was not found in file.""") # Copy the file permissions from the old file to the new file copymode(A_ , A_) # Remove original file remove(A_) # Move new file move(A_ , A_) def skip_units(A_ : Optional[int]): return ( ("generating PyTorch" in line and not output_pytorch) or ("generating TensorFlow" in line and not output_tensorflow) or ("generating Flax" in line and not output_flax) ) def replace_in_files(A_ : Dict): with open(A_) as datafile: lowerCAmelCase_ : Dict = [] lowerCAmelCase_ : Any = False lowerCAmelCase_ : Dict = False for line in datafile: if "# To replace in: " in line and "##" not in line: lowerCAmelCase_ : Tuple = line.split('''"''')[1] lowerCAmelCase_ : Union[str, Any] = skip_units(A_) elif "# Below: " in line and "##" not in line: lowerCAmelCase_ : List[str] = line.split('''"''')[1] lowerCAmelCase_ : List[str] = skip_units(A_) elif "# End." in line and "##" not in line: if not skip_file and not skip_snippet: replace(A_ , A_ , A_) lowerCAmelCase_ : Any = [] elif "# Replace with" in line and "##" not in line: lowerCAmelCase_ : str = [] elif "##" not in line: lines_to_copy.append(A_) remove(A_) replace_in_files(F"""{directory}/to_replace_{lowercase_model_name}.py""") os.rmdir(A_)
171
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.: # python ./utils/get_modified_files.py utils src tests examples # # it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered # since the output of this script is fed into Makefile commands it doesn't print a newline after the results import re import subprocess import sys A__ : str = subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''') A__ : Union[str, Any] = subprocess.check_output(F'''git diff --name-only {fork_point_sha}'''.split()).decode('''utf-8''').split() A__ : Any = '''|'''.join(sys.argv[1:]) A__ : Tuple = re.compile(RF'''^({joined_dirs}).*?\.py$''') A__ : List[str] = [x for x in modified_files if regex.match(x)] print(''' '''.join(relevant_modified_files), end='''''')
171
1
from collections import namedtuple lowerCAmelCase_ = namedtuple('''from_to''', '''from_ to''') lowerCAmelCase_ = { '''cubicmeter''': from_to(1, 1), '''litre''': from_to(0.0_01, 1000), '''kilolitre''': from_to(1, 1), '''gallon''': from_to(0.0_04_54, 264.172), '''cubicyard''': from_to(0.7_64_55, 1.3_07_95), '''cubicfoot''': from_to(0.0_28, 35.31_47), '''cup''': from_to(0.0_00_23_65_88, 4226.75), } def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> float: if from_type not in METRIC_CONVERSION: raise ValueError( F"""Invalid 'from_type' value: {from_type!r} Supported values are:\n""" + """, """.join(__SCREAMING_SNAKE_CASE ) ) if to_type not in METRIC_CONVERSION: raise ValueError( F"""Invalid 'to_type' value: {to_type!r}. Supported values are:\n""" + """, """.join(__SCREAMING_SNAKE_CASE ) ) return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to if __name__ == "__main__": import doctest doctest.testmod()
703
"""simple docstring""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { '''asapp/sew-tiny-100k''': '''https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json''', # See all SEW models at https://huggingface.co/models?filter=sew } class _snake_case ( __snake_case ): """simple docstring""" a = "sew" def __init__( self : List[Any] , _A : Tuple=3_2 , _A : str=7_6_8 , _A : Dict=1_2 , _A : Tuple=1_2 , _A : Optional[Any]=3_0_7_2 , _A : List[str]=2 , _A : Dict="gelu" , _A : Union[str, Any]=0.1 , _A : Optional[int]=0.1 , _A : Optional[int]=0.1 , _A : Optional[int]=0.0 , _A : str=0.1 , _A : Tuple=0.1 , _A : Optional[int]=0.02 , _A : Dict=1e-5 , _A : str="group" , _A : Tuple="gelu" , _A : Union[str, Any]=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , _A : Optional[Any]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , _A : Any=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , _A : Tuple=False , _A : Tuple=1_2_8 , _A : int=1_6 , _A : Union[str, Any]=True , _A : Optional[Any]=0.05 , _A : List[Any]=1_0 , _A : Union[str, Any]=2 , _A : Tuple=0.0 , _A : Union[str, Any]=1_0 , _A : Optional[int]=0 , _A : Union[str, Any]="mean" , _A : Optional[int]=False , _A : List[Any]=False , _A : int=2_5_6 , _A : str=0 , _A : Optional[int]=1 , _A : List[Any]=2 , **_A : Dict , ): """simple docstring""" super().__init__(**_A , pad_token_id=_A , bos_token_id=_A , eos_token_id=_A) _SCREAMING_SNAKE_CASE : str = hidden_size _SCREAMING_SNAKE_CASE : Optional[int] = feat_extract_norm _SCREAMING_SNAKE_CASE : Optional[int] = feat_extract_activation _SCREAMING_SNAKE_CASE : Dict = list(_A) _SCREAMING_SNAKE_CASE : int = list(_A) _SCREAMING_SNAKE_CASE : int = list(_A) _SCREAMING_SNAKE_CASE : str = conv_bias _SCREAMING_SNAKE_CASE : Tuple = num_conv_pos_embeddings _SCREAMING_SNAKE_CASE : List[str] = num_conv_pos_embedding_groups _SCREAMING_SNAKE_CASE : Tuple = len(self.conv_dim) _SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers _SCREAMING_SNAKE_CASE : List[str] = intermediate_size _SCREAMING_SNAKE_CASE : str = squeeze_factor _SCREAMING_SNAKE_CASE : Dict = hidden_act _SCREAMING_SNAKE_CASE : str = num_attention_heads _SCREAMING_SNAKE_CASE : Dict = hidden_dropout _SCREAMING_SNAKE_CASE : Tuple = attention_dropout _SCREAMING_SNAKE_CASE : int = activation_dropout _SCREAMING_SNAKE_CASE : Any = feat_proj_dropout _SCREAMING_SNAKE_CASE : str = final_dropout _SCREAMING_SNAKE_CASE : Union[str, Any] = layerdrop _SCREAMING_SNAKE_CASE : Any = layer_norm_eps _SCREAMING_SNAKE_CASE : int = initializer_range _SCREAMING_SNAKE_CASE : List[Any] = vocab_size if ( (len(self.conv_stride) != self.num_feat_extract_layers) or (len(self.conv_kernel) != self.num_feat_extract_layers) or (len(self.conv_dim) != self.num_feat_extract_layers) ): raise ValueError( """Configuration for convolutional layers is incorrect.""" """It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,""" f"""but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride)""" f"""= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`.""") # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _SCREAMING_SNAKE_CASE : List[Any] = apply_spec_augment _SCREAMING_SNAKE_CASE : List[Any] = mask_time_prob _SCREAMING_SNAKE_CASE : List[str] = mask_time_length _SCREAMING_SNAKE_CASE : List[Any] = mask_time_min_masks _SCREAMING_SNAKE_CASE : List[Any] = mask_feature_prob _SCREAMING_SNAKE_CASE : int = mask_feature_length _SCREAMING_SNAKE_CASE : List[Any] = mask_feature_min_masks # ctc loss _SCREAMING_SNAKE_CASE : int = ctc_loss_reduction _SCREAMING_SNAKE_CASE : Optional[int] = ctc_zero_infinity # sequence classification _SCREAMING_SNAKE_CASE : Dict = use_weighted_layer_sum _SCREAMING_SNAKE_CASE : List[str] = classifier_proj_size @property def _lowerCAmelCase ( self : Any): """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1)
635
0
"""simple docstring""" from ..utils import DummyObject, requires_backends class SCREAMING_SNAKE_CASE ( metaclass=a_ ): """simple docstring""" lowercase__ = ["torch", "torchsde"] def __init__( self : str ,*lowercase_ : Optional[int] ,**lowercase_ : Union[str, Any] ): requires_backends(self ,['''torch''', '''torchsde'''] ) @classmethod def __lowerCAmelCase ( cls : str ,*lowercase_ : int ,**lowercase_ : str ): requires_backends(cls ,['''torch''', '''torchsde'''] ) @classmethod def __lowerCAmelCase ( cls : Dict ,*lowercase_ : Union[str, Any] ,**lowercase_ : Optional[int] ): requires_backends(cls ,['''torch''', '''torchsde'''] )
450
"""simple docstring""" import os import shutil from pathlib import Path from typing import Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging if is_onnx_available(): import onnxruntime as ort __UpperCamelCase : int = logging.get_logger(__name__) __UpperCamelCase : Optional[Any] = { '''tensor(bool)''': np.bool_, '''tensor(int8)''': np.inta, '''tensor(uint8)''': np.uinta, '''tensor(int16)''': np.intaa, '''tensor(uint16)''': np.uintaa, '''tensor(int32)''': np.intaa, '''tensor(uint32)''': np.uintaa, '''tensor(int64)''': np.intaa, '''tensor(uint64)''': np.uintaa, '''tensor(float16)''': np.floataa, '''tensor(float)''': np.floataa, '''tensor(double)''': np.floataa, } class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self : Optional[Any] ,lowercase_ : Union[str, Any]=None ,**lowercase_ : List[str] ): logger.info('''`diffusers.OnnxRuntimeModel` is experimental and might change in the future.''' ) lowerCAmelCase__ : List[str] = model lowerCAmelCase__ : Optional[Any] = kwargs.get('''model_save_dir''' ,lowercase_ ) lowerCAmelCase__ : str = kwargs.get('''latest_model_name''' ,lowercase_ ) def __call__( self : Any ,**lowercase_ : Dict ): lowerCAmelCase__ : List[Any] = {k: np.array(lowercase_ ) for k, v in kwargs.items()} return self.model.run(lowercase_ ,lowercase_ ) @staticmethod def __lowerCAmelCase ( lowercase_ : Union[str, Path] ,lowercase_ : str=None ,lowercase_ : str=None ): if provider is None: logger.info('''No onnxruntime provider specified, using CPUExecutionProvider''' ) lowerCAmelCase__ : Optional[Any] = '''CPUExecutionProvider''' return ort.InferenceSession(lowercase_ ,providers=[provider] ,sess_options=lowercase_ ) def __lowerCAmelCase ( self : Dict ,lowercase_ : Union[str, Path] ,lowercase_ : Optional[str] = None ,**lowercase_ : str ): lowerCAmelCase__ : Any = file_name if file_name is not None else ONNX_WEIGHTS_NAME lowerCAmelCase__ : Tuple = self.model_save_dir.joinpath(self.latest_model_name ) lowerCAmelCase__ : List[Any] = Path(lowercase_ ).joinpath(lowercase_ ) try: shutil.copyfile(lowercase_ ,lowercase_ ) except shutil.SameFileError: pass # copy external weights (for models >2GB) lowerCAmelCase__ : Dict = self.model_save_dir.joinpath(lowercase_ ) if src_path.exists(): lowerCAmelCase__ : Union[str, Any] = Path(lowercase_ ).joinpath(lowercase_ ) try: shutil.copyfile(lowercase_ ,lowercase_ ) except shutil.SameFileError: pass def __lowerCAmelCase ( self : Optional[int] ,lowercase_ : Union[str, os.PathLike] ,**lowercase_ : Union[str, Any] ,): if os.path.isfile(lowercase_ ): logger.error(F'Provided path ({save_directory}) should be a directory, not a file' ) return os.makedirs(lowercase_ ,exist_ok=lowercase_ ) # saving model weights/files self._save_pretrained(lowercase_ ,**lowercase_ ) @classmethod def __lowerCAmelCase ( cls : Dict ,lowercase_ : Union[str, Path] ,lowercase_ : Optional[Union[bool, str, None]] = None ,lowercase_ : Optional[Union[str, None]] = None ,lowercase_ : bool = False ,lowercase_ : Optional[str] = None ,lowercase_ : Optional[str] = None ,lowercase_ : Optional[str] = None ,lowercase_ : Optional["ort.SessionOptions"] = None ,**lowercase_ : Optional[int] ,): lowerCAmelCase__ : Optional[Any] = file_name if file_name is not None else ONNX_WEIGHTS_NAME # load model from local directory if os.path.isdir(lowercase_ ): lowerCAmelCase__ : str = OnnxRuntimeModel.load_model( os.path.join(lowercase_ ,lowercase_ ) ,provider=lowercase_ ,sess_options=lowercase_ ) lowerCAmelCase__ : Any = Path(lowercase_ ) # load model from hub else: # download model lowerCAmelCase__ : Optional[Any] = hf_hub_download( repo_id=lowercase_ ,filename=lowercase_ ,use_auth_token=lowercase_ ,revision=lowercase_ ,cache_dir=lowercase_ ,force_download=lowercase_ ,) lowerCAmelCase__ : str = Path(lowercase_ ).parent lowerCAmelCase__ : Union[str, Any] = Path(lowercase_ ).name lowerCAmelCase__ : Optional[Any] = OnnxRuntimeModel.load_model(lowercase_ ,provider=lowercase_ ,sess_options=lowercase_ ) return cls(model=lowercase_ ,**lowercase_ ) @classmethod def __lowerCAmelCase ( cls : Any ,lowercase_ : Union[str, Path] ,lowercase_ : bool = True ,lowercase_ : Optional[str] = None ,lowercase_ : Optional[str] = None ,**lowercase_ : Any ,): lowerCAmelCase__ : Union[str, Any] = None if len(str(lowercase_ ).split('''@''' ) ) == 2: lowerCAmelCase__ ,lowerCAmelCase__ : str = model_id.split('''@''' ) return cls._from_pretrained( model_id=lowercase_ ,revision=lowercase_ ,cache_dir=lowercase_ ,force_download=lowercase_ ,use_auth_token=lowercase_ ,**lowercase_ ,)
450
1
"""simple docstring""" from unittest import TestCase from datasets import Dataset from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters def lowerCAmelCase ( ): """simple docstring""" __A = { '''repo_name''': ['''test_repo1''', '''test_repo2''', '''test_repo3'''], '''path''': ['''test_1.py''', '''test_2.py''', '''unit_test.py'''], '''content''': ['''a ''' * 2_0, '''a ''' * 3_0, '''b ''' * 7], } __A = Dataset.from_dict(__UpperCamelCase ) return dataset class snake_case ( _lowerCAmelCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : List[str] ): '''simple docstring''' __A = get_dataset() __A = make_duplicate_clusters(_lowerCamelCase, 0.85 ) self.assertEqual(len(duplicate_clusters[0] ), 2 ) def _SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' __A = get_dataset() __A , __A = deduplicate_dataset(_lowerCamelCase ) self.assertEqual(len(_lowerCamelCase ), 2 ) print(_lowerCamelCase ) self.assertEqual(duplicate_clusters[0][0]['''copies'''], 2 ) self.assertEqual(duplicate_clusters[0][0]['''is_extreme'''], _lowerCamelCase )
215
"""simple docstring""" from dataclasses import dataclass from typing import Optional, Tuple import torch from torch import nn from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel from transformers.utils import ModelOutput @dataclass class snake_case ( _lowerCAmelCase ): '''simple docstring''' A_ : Optional[torch.FloatTensor] = None A_ : torch.FloatTensor = None A_ : Optional[Tuple[torch.FloatTensor]] = None A_ : Optional[Tuple[torch.FloatTensor]] = None class snake_case ( _lowerCAmelCase ): '''simple docstring''' def __init__( self : Tuple, _lowerCamelCase : List[str]=1, _lowerCamelCase : Union[str, Any]=0, _lowerCamelCase : List[str]=2, _lowerCamelCase : Optional[int]=5_12, _lowerCamelCase : Optional[Any]="cls", _lowerCamelCase : List[str]=False, _lowerCamelCase : Optional[Any]=True, **_lowerCamelCase : Any, ): '''simple docstring''' super().__init__(pad_token_id=_lowerCamelCase, bos_token_id=_lowerCamelCase, eos_token_id=_lowerCamelCase, **_lowerCamelCase ) __A = project_dim __A = pooler_fn __A = learn_encoder __A = use_attention_mask class snake_case ( _lowerCAmelCase ): '''simple docstring''' A_ : int = [R"pooler", R"logit_scale"] A_ : List[Any] = [R"position_ids", R"predictions.decoder.bias"] A_ : Union[str, Any] = "roberta" A_ : Dict = RobertaSeriesConfig def __init__( self : Optional[Any], _lowerCamelCase : Tuple ): '''simple docstring''' super().__init__(_lowerCamelCase ) __A = XLMRobertaModel(_lowerCamelCase ) __A = nn.Linear(config.hidden_size, config.project_dim ) __A = getattr(_lowerCamelCase, '''has_pre_transformation''', _lowerCamelCase ) if self.has_pre_transformation: __A = nn.Linear(config.hidden_size, config.project_dim ) __A = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps ) self.post_init() def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : Optional[torch.Tensor] = None, _lowerCamelCase : Optional[torch.Tensor] = None, _lowerCamelCase : Optional[torch.Tensor] = None, _lowerCamelCase : Optional[torch.Tensor] = None, _lowerCamelCase : Optional[torch.Tensor] = None, _lowerCamelCase : Optional[torch.Tensor] = None, _lowerCamelCase : Optional[torch.Tensor] = None, _lowerCamelCase : Optional[torch.Tensor] = None, _lowerCamelCase : Optional[bool] = None, _lowerCamelCase : Optional[bool] = None, _lowerCamelCase : Optional[bool] = None, ): '''simple docstring''' __A = return_dict if return_dict is not None else self.config.use_return_dict __A = self.base_model( input_ids=_lowerCamelCase, attention_mask=_lowerCamelCase, token_type_ids=_lowerCamelCase, position_ids=_lowerCamelCase, head_mask=_lowerCamelCase, inputs_embeds=_lowerCamelCase, encoder_hidden_states=_lowerCamelCase, encoder_attention_mask=_lowerCamelCase, output_attentions=_lowerCamelCase, output_hidden_states=True if self.has_pre_transformation else output_hidden_states, return_dict=_lowerCamelCase, ) if self.has_pre_transformation: __A = outputs['''hidden_states'''][-2] __A = self.pre_LN(_lowerCamelCase ) __A = self.transformation_pre(_lowerCamelCase ) return TransformationModelOutput( projection_state=_lowerCamelCase, last_hidden_state=outputs.last_hidden_state, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) else: __A = self.transformation(outputs.last_hidden_state ) return TransformationModelOutput( projection_state=_lowerCamelCase, last_hidden_state=outputs.last_hidden_state, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
215
1
"""simple docstring""" import argparse import json import os import pickle import shutil import numpy as np import torch from distiller import Distiller from lm_seqs_dataset import LmSeqsDataset from transformers import ( BertConfig, BertForMaskedLM, BertTokenizer, DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer, GPTaConfig, GPTaLMHeadModel, GPTaTokenizer, RobertaConfig, RobertaForMaskedLM, RobertaTokenizer, ) from utils import git_log, init_gpu_params, logger, set_seed lowercase__ = { """distilbert""": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer), """roberta""": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer), """bert""": (BertConfig, BertForMaskedLM, BertTokenizer), """gpt2""": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer), } def __lowerCamelCase ( __UpperCamelCase ) -> int: """simple docstring""" assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0) assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0) if args.mlm: assert os.path.isfile(args.token_counts ) assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"]) else: assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"]) assert args.teacher_type == args.student_type or ( args.student_type == "distilbert" and args.teacher_type == "bert" ) assert os.path.isfile(args.student_config ) if args.student_pretrained_weights is not None: assert os.path.isfile(args.student_pretrained_weights ) if args.freeze_token_type_embds: assert args.student_type in ["roberta"] assert args.alpha_ce >= 0.0 assert args.alpha_mlm >= 0.0 assert args.alpha_clm >= 0.0 assert args.alpha_mse >= 0.0 assert args.alpha_cos >= 0.0 assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0 def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> Any: """simple docstring""" if args.student_type == "roberta": lowerCAmelCase_ : Optional[int] = False elif args.student_type == "gpt2": lowerCAmelCase_ : List[str] = False def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]: """simple docstring""" if args.student_type == "roberta": lowerCAmelCase_ : Optional[int] = False def __lowerCamelCase ( ) -> Optional[Any]: """simple docstring""" lowerCAmelCase_ : int = argparse.ArgumentParser(description="Training" ) parser.add_argument("--force" , action="store_true" , help="Overwrite dump_path if it already exists." ) parser.add_argument( "--dump_path" , type=__UpperCamelCase , required=__UpperCamelCase , help="The output directory (log, checkpoints, parameters, etc.)" ) parser.add_argument( "--data_file" , type=__UpperCamelCase , required=__UpperCamelCase , help="The binarized file (tokenized + tokens_to_ids) and grouped by sequence." , ) parser.add_argument( "--student_type" , type=__UpperCamelCase , choices=["distilbert", "roberta", "gpt2"] , required=__UpperCamelCase , help="The student type (DistilBERT, RoBERTa)." , ) parser.add_argument("--student_config" , type=__UpperCamelCase , required=__UpperCamelCase , help="Path to the student configuration." ) parser.add_argument( "--student_pretrained_weights" , default=__UpperCamelCase , type=__UpperCamelCase , help="Load student initialization checkpoint." ) parser.add_argument( "--teacher_type" , choices=["bert", "roberta", "gpt2"] , required=__UpperCamelCase , help="Teacher type (BERT, RoBERTa)." ) parser.add_argument("--teacher_name" , type=__UpperCamelCase , required=__UpperCamelCase , help="The teacher model." ) parser.add_argument("--temperature" , default=2.0 , type=__UpperCamelCase , help="Temperature for the softmax temperature." ) parser.add_argument( "--alpha_ce" , default=0.5 , type=__UpperCamelCase , help="Linear weight for the distillation loss. Must be >=0." ) parser.add_argument( "--alpha_mlm" , default=0.0 , type=__UpperCamelCase , help="Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag." , ) parser.add_argument("--alpha_clm" , default=0.5 , type=__UpperCamelCase , help="Linear weight for the CLM loss. Must be >=0." ) parser.add_argument("--alpha_mse" , default=0.0 , type=__UpperCamelCase , help="Linear weight of the MSE loss. Must be >=0." ) parser.add_argument( "--alpha_cos" , default=0.0 , type=__UpperCamelCase , help="Linear weight of the cosine embedding loss. Must be >=0." ) parser.add_argument( "--mlm" , action="store_true" , help="The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM." ) parser.add_argument( "--mlm_mask_prop" , default=0.15 , type=__UpperCamelCase , help="Proportion of tokens for which we need to make a prediction." , ) parser.add_argument("--word_mask" , default=0.8 , type=__UpperCamelCase , help="Proportion of tokens to mask out." ) parser.add_argument("--word_keep" , default=0.1 , type=__UpperCamelCase , help="Proportion of tokens to keep." ) parser.add_argument("--word_rand" , default=0.1 , type=__UpperCamelCase , help="Proportion of tokens to randomly replace." ) parser.add_argument( "--mlm_smoothing" , default=0.7 , type=__UpperCamelCase , help="Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec)." , ) parser.add_argument("--token_counts" , type=__UpperCamelCase , help="The token counts in the data_file for MLM." ) parser.add_argument( "--restrict_ce_to_mask" , action="store_true" , help="If true, compute the distillation loss only the [MLM] prediction distribution." , ) parser.add_argument( "--freeze_pos_embs" , action="store_true" , help="Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only." , ) parser.add_argument( "--freeze_token_type_embds" , action="store_true" , help="Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only." , ) parser.add_argument("--n_epoch" , type=__UpperCamelCase , default=3 , help="Number of pass on the whole dataset." ) parser.add_argument("--batch_size" , type=__UpperCamelCase , default=5 , help="Batch size (for each process)." ) parser.add_argument( "--group_by_size" , action="store_false" , help="If true, group sequences that have similar length into the same batch. Default is true." , ) parser.add_argument( "--gradient_accumulation_steps" , type=__UpperCamelCase , default=50 , help="Gradient accumulation for larger training batches." , ) parser.add_argument("--warmup_prop" , default=0.05 , type=__UpperCamelCase , help="Linear warmup proportion." ) parser.add_argument("--weight_decay" , default=0.0 , type=__UpperCamelCase , help="Weight decay if we apply some." ) parser.add_argument("--learning_rate" , default=5e-4 , type=__UpperCamelCase , help="The initial learning rate for Adam." ) parser.add_argument("--adam_epsilon" , default=1e-6 , type=__UpperCamelCase , help="Epsilon for Adam optimizer." ) parser.add_argument("--max_grad_norm" , default=5.0 , type=__UpperCamelCase , help="Max gradient norm." ) parser.add_argument("--initializer_range" , default=0.02 , type=__UpperCamelCase , help="Random initialization range." ) parser.add_argument( "--fp16" , action="store_true" , help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit" , ) parser.add_argument( "--fp16_opt_level" , type=__UpperCamelCase , default="O1" , help=( "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html" ) , ) parser.add_argument("--n_gpu" , type=__UpperCamelCase , default=1 , help="Number of GPUs in the node." ) parser.add_argument("--local_rank" , type=__UpperCamelCase , default=-1 , help="Distributed training - Local rank" ) parser.add_argument("--seed" , type=__UpperCamelCase , default=56 , help="Random seed" ) parser.add_argument("--log_interval" , type=__UpperCamelCase , default=500 , help="Tensorboard logging interval." ) parser.add_argument("--checkpoint_interval" , type=__UpperCamelCase , default=4000 , help="Checkpoint interval." ) lowerCAmelCase_ : List[str] = parser.parse_args() sanity_checks(__UpperCamelCase ) # ARGS # init_gpu_params(__UpperCamelCase ) set_seed(__UpperCamelCase ) if args.is_master: if os.path.exists(args.dump_path ): if not args.force: raise ValueError( f'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite''' " itUse `--force` if you want to overwrite it" ) else: shutil.rmtree(args.dump_path ) if not os.path.exists(args.dump_path ): os.makedirs(args.dump_path ) logger.info(f'''Experiment will be dumped and logged in {args.dump_path}''' ) # SAVE PARAMS # logger.info(f'''Param: {args}''' ) with open(os.path.join(args.dump_path , "parameters.json" ) , "w" ) as f: json.dump(vars(__UpperCamelCase ) , __UpperCamelCase , indent=4 ) git_log(args.dump_path ) lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[str] = MODEL_CLASSES[args.student_type] lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : str = MODEL_CLASSES[args.teacher_type] # TOKENIZER # lowerCAmelCase_ : Union[str, Any] = teacher_tokenizer_class.from_pretrained(args.teacher_name ) lowerCAmelCase_ : Any = {} for tok_name, tok_symbol in tokenizer.special_tokens_map.items(): lowerCAmelCase_ : str = tokenizer.all_special_tokens.index(__UpperCamelCase ) lowerCAmelCase_ : Optional[Any] = tokenizer.all_special_ids[idx] logger.info(f'''Special tokens {special_tok_ids}''' ) lowerCAmelCase_ : Union[str, Any] = special_tok_ids lowerCAmelCase_ : List[Any] = tokenizer.max_model_input_sizes[args.teacher_name] # DATA LOADER # logger.info(f'''Loading data from {args.data_file}''' ) with open(args.data_file , "rb" ) as fp: lowerCAmelCase_ : Optional[int] = pickle.load(__UpperCamelCase ) if args.mlm: logger.info(f'''Loading token counts from {args.token_counts} (already pre-computed)''' ) with open(args.token_counts , "rb" ) as fp: lowerCAmelCase_ : List[str] = pickle.load(__UpperCamelCase ) lowerCAmelCase_ : Union[str, Any] = np.maximum(__UpperCamelCase , 1 ) ** -args.mlm_smoothing for idx in special_tok_ids.values(): lowerCAmelCase_ : str = 0.0 # do not predict special tokens lowerCAmelCase_ : Tuple = torch.from_numpy(__UpperCamelCase ) else: lowerCAmelCase_ : List[str] = None lowerCAmelCase_ : Union[str, Any] = LmSeqsDataset(params=__UpperCamelCase , data=__UpperCamelCase ) logger.info("Data loader created." ) # STUDENT # logger.info(f'''Loading student config from {args.student_config}''' ) lowerCAmelCase_ : str = student_config_class.from_pretrained(args.student_config ) lowerCAmelCase_ : Dict = True if args.student_pretrained_weights is not None: logger.info(f'''Loading pretrained weights from {args.student_pretrained_weights}''' ) lowerCAmelCase_ : Tuple = student_model_class.from_pretrained(args.student_pretrained_weights , config=__UpperCamelCase ) else: lowerCAmelCase_ : Dict = student_model_class(__UpperCamelCase ) if args.n_gpu > 0: student.to(f'''cuda:{args.local_rank}''' ) logger.info("Student loaded." ) # TEACHER # lowerCAmelCase_ : Optional[Any] = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=__UpperCamelCase ) if args.n_gpu > 0: teacher.to(f'''cuda:{args.local_rank}''' ) logger.info(f'''Teacher loaded from {args.teacher_name}.''' ) # FREEZING # if args.freeze_pos_embs: freeze_pos_embeddings(__UpperCamelCase , __UpperCamelCase ) if args.freeze_token_type_embds: freeze_token_type_embeddings(__UpperCamelCase , __UpperCamelCase ) # SANITY CHECKS # assert student.config.vocab_size == teacher.config.vocab_size assert student.config.hidden_size == teacher.config.hidden_size assert student.config.max_position_embeddings == teacher.config.max_position_embeddings if args.mlm: assert token_probs.size(0 ) == stu_architecture_config.vocab_size # DISTILLER # torch.cuda.empty_cache() lowerCAmelCase_ : Any = Distiller( params=__UpperCamelCase , dataset=__UpperCamelCase , token_probs=__UpperCamelCase , student=__UpperCamelCase , teacher=__UpperCamelCase ) distiller.train() logger.info("Let's go get some drinks." ) if __name__ == "__main__": main()
610
"""simple docstring""" lowercase__ = """ # Transformers 설치 방법 ! pip install transformers datasets # 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요. # ! pip install git+https://github.com/huggingface/transformers.git """ lowercase__ = [{"""type""": """code""", """content""": INSTALL_CONTENT}] lowercase__ = { """{processor_class}""": """FakeProcessorClass""", """{model_class}""": """FakeModelClass""", """{object_class}""": """FakeObjectClass""", }
610
1
import unittest from transformers.testing_utils import CaptureStdout from transformers.tools.python_interpreter import evaluate def lowercase__ ( _UpperCamelCase) -> List[Any]: """simple docstring""" return x + 2 class A__ ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" UpperCamelCase = 'x = 3' UpperCamelCase = {} UpperCamelCase = evaluate(_SCREAMING_SNAKE_CASE , {} , state=_SCREAMING_SNAKE_CASE ) assert result == 3 self.assertDictEqual(_SCREAMING_SNAKE_CASE , {'x': 3} ) UpperCamelCase = 'x = y' UpperCamelCase = {'y': 5} UpperCamelCase = evaluate(_SCREAMING_SNAKE_CASE , {} , state=_SCREAMING_SNAKE_CASE ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(_SCREAMING_SNAKE_CASE , {'x': 5, 'y': 5} ) def _SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" UpperCamelCase = 'y = add_two(x)' UpperCamelCase = {'x': 3} UpperCamelCase = evaluate(_SCREAMING_SNAKE_CASE , {'add_two': add_two} , state=_SCREAMING_SNAKE_CASE ) assert result == 5 self.assertDictEqual(_SCREAMING_SNAKE_CASE , {'x': 3, 'y': 5} ) # Won't work without the tool with CaptureStdout() as out: UpperCamelCase = evaluate(_SCREAMING_SNAKE_CASE , {} , state=_SCREAMING_SNAKE_CASE ) assert result is None assert "tried to execute add_two" in out.out def _SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" UpperCamelCase = 'x = 3' UpperCamelCase = {} UpperCamelCase = evaluate(_SCREAMING_SNAKE_CASE , {} , state=_SCREAMING_SNAKE_CASE ) assert result == 3 self.assertDictEqual(_SCREAMING_SNAKE_CASE , {'x': 3} ) def _SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" UpperCamelCase = 'test_dict = {\'x\': x, \'y\': add_two(x)}' UpperCamelCase = {'x': 3} UpperCamelCase = evaluate(_SCREAMING_SNAKE_CASE , {'add_two': add_two} , state=_SCREAMING_SNAKE_CASE ) self.assertDictEqual(_SCREAMING_SNAKE_CASE , {'x': 3, 'y': 5} ) self.assertDictEqual(_SCREAMING_SNAKE_CASE , {'x': 3, 'test_dict': {'x': 3, 'y': 5}} ) def _SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" UpperCamelCase = 'x = 3\ny = 5' UpperCamelCase = {} UpperCamelCase = evaluate(_SCREAMING_SNAKE_CASE , {} , state=_SCREAMING_SNAKE_CASE ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(_SCREAMING_SNAKE_CASE , {'x': 3, 'y': 5} ) def _SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" UpperCamelCase = 'text = f\'This is x: {x}.\'' UpperCamelCase = {'x': 3} UpperCamelCase = evaluate(_SCREAMING_SNAKE_CASE , {} , state=_SCREAMING_SNAKE_CASE ) # evaluate returns the value of the last assignment. assert result == "This is x: 3." self.assertDictEqual(_SCREAMING_SNAKE_CASE , {'x': 3, 'text': 'This is x: 3.'} ) def _SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" UpperCamelCase = 'if x <= 3:\n y = 2\nelse:\n y = 5' UpperCamelCase = {'x': 3} UpperCamelCase = evaluate(_SCREAMING_SNAKE_CASE , {} , state=_SCREAMING_SNAKE_CASE ) # evaluate returns the value of the last assignment. assert result == 2 self.assertDictEqual(_SCREAMING_SNAKE_CASE , {'x': 3, 'y': 2} ) UpperCamelCase = {'x': 8} UpperCamelCase = evaluate(_SCREAMING_SNAKE_CASE , {} , state=_SCREAMING_SNAKE_CASE ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(_SCREAMING_SNAKE_CASE , {'x': 8, 'y': 5} ) def _SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" UpperCamelCase = 'test_list = [x, add_two(x)]' UpperCamelCase = {'x': 3} UpperCamelCase = evaluate(_SCREAMING_SNAKE_CASE , {'add_two': add_two} , state=_SCREAMING_SNAKE_CASE ) self.assertListEqual(_SCREAMING_SNAKE_CASE , [3, 5] ) self.assertDictEqual(_SCREAMING_SNAKE_CASE , {'x': 3, 'test_list': [3, 5]} ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" UpperCamelCase = 'y = x' UpperCamelCase = {'x': 3} UpperCamelCase = evaluate(_SCREAMING_SNAKE_CASE , {} , state=_SCREAMING_SNAKE_CASE ) assert result == 3 self.assertDictEqual(_SCREAMING_SNAKE_CASE , {'x': 3, 'y': 3} ) def _SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" UpperCamelCase = 'test_list = [x, add_two(x)]\ntest_list[1]' UpperCamelCase = {'x': 3} UpperCamelCase = evaluate(_SCREAMING_SNAKE_CASE , {'add_two': add_two} , state=_SCREAMING_SNAKE_CASE ) assert result == 5 self.assertDictEqual(_SCREAMING_SNAKE_CASE , {'x': 3, 'test_list': [3, 5]} ) UpperCamelCase = 'test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']' UpperCamelCase = {'x': 3} UpperCamelCase = evaluate(_SCREAMING_SNAKE_CASE , {'add_two': add_two} , state=_SCREAMING_SNAKE_CASE ) assert result == 5 self.assertDictEqual(_SCREAMING_SNAKE_CASE , {'x': 3, 'test_dict': {'x': 3, 'y': 5}} ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" UpperCamelCase = 'x = 0\nfor i in range(3):\n x = i' UpperCamelCase = {} UpperCamelCase = evaluate(_SCREAMING_SNAKE_CASE , {'range': range} , state=_SCREAMING_SNAKE_CASE ) assert result == 2 self.assertDictEqual(_SCREAMING_SNAKE_CASE , {'x': 2, 'i': 2} )
410
from __future__ import annotations import queue class A__ : '''simple docstring''' def __init__( self : str , _SCREAMING_SNAKE_CASE : Tuple ): """simple docstring""" UpperCamelCase = data UpperCamelCase = None UpperCamelCase = None def lowercase__ ( ) -> TreeNode: """simple docstring""" print('\n********Press N to stop entering at any point of time********\n') UpperCamelCase = input('Enter the value of the root node: ').strip().lower() UpperCamelCase = queue.Queue() UpperCamelCase = TreeNode(int(_UpperCamelCase)) q.put(_UpperCamelCase) while not q.empty(): UpperCamelCase = q.get() UpperCamelCase = F'Enter the left node of {node_found.data}: ' UpperCamelCase = input(_UpperCamelCase).strip().lower() or 'n' if check == "n": return tree_node UpperCamelCase = TreeNode(int(_UpperCamelCase)) UpperCamelCase = left_node q.put(_UpperCamelCase) UpperCamelCase = F'Enter the right node of {node_found.data}: ' UpperCamelCase = input(_UpperCamelCase).strip().lower() or 'n' if check == "n": return tree_node UpperCamelCase = TreeNode(int(_UpperCamelCase)) UpperCamelCase = right_node q.put(_UpperCamelCase) raise def lowercase__ ( _UpperCamelCase) -> None: """simple docstring""" if not isinstance(_UpperCamelCase , _UpperCamelCase) or not node: return print(node.data , end=',') pre_order(node.left) pre_order(node.right) def lowercase__ ( _UpperCamelCase) -> None: """simple docstring""" if not isinstance(_UpperCamelCase , _UpperCamelCase) or not node: return in_order(node.left) print(node.data , end=',') in_order(node.right) def lowercase__ ( _UpperCamelCase) -> None: """simple docstring""" if not isinstance(_UpperCamelCase , _UpperCamelCase) or not node: return post_order(node.left) post_order(node.right) print(node.data , end=',') def lowercase__ ( _UpperCamelCase) -> None: """simple docstring""" if not isinstance(_UpperCamelCase , _UpperCamelCase) or not node: return UpperCamelCase = queue.Queue() q.put(_UpperCamelCase) while not q.empty(): UpperCamelCase = q.get() print(node_dequeued.data , end=',') if node_dequeued.left: q.put(node_dequeued.left) if node_dequeued.right: q.put(node_dequeued.right) def lowercase__ ( _UpperCamelCase) -> None: """simple docstring""" if not isinstance(_UpperCamelCase , _UpperCamelCase) or not node: return UpperCamelCase = queue.Queue() q.put(_UpperCamelCase) while not q.empty(): UpperCamelCase = [] while not q.empty(): UpperCamelCase = q.get() print(node_dequeued.data , end=',') if node_dequeued.left: list_.append(node_dequeued.left) if node_dequeued.right: list_.append(node_dequeued.right) print() for node in list_: q.put(_UpperCamelCase) def lowercase__ ( _UpperCamelCase) -> None: """simple docstring""" if not isinstance(_UpperCamelCase , _UpperCamelCase) or not node: return UpperCamelCase = [] UpperCamelCase = node while n or stack: while n: # start from root node, find its left child print(n.data , end=',') stack.append(_UpperCamelCase) UpperCamelCase = n.left # end of while means current node doesn't have left child UpperCamelCase = stack.pop() # start to traverse its right child UpperCamelCase = n.right def lowercase__ ( _UpperCamelCase) -> None: """simple docstring""" if not isinstance(_UpperCamelCase , _UpperCamelCase) or not node: return UpperCamelCase = [] UpperCamelCase = node while n or stack: while n: stack.append(_UpperCamelCase) UpperCamelCase = n.left UpperCamelCase = stack.pop() print(n.data , end=',') UpperCamelCase = n.right def lowercase__ ( _UpperCamelCase) -> None: """simple docstring""" if not isinstance(_UpperCamelCase , _UpperCamelCase) or not node: return UpperCamelCase , UpperCamelCase = [], [] UpperCamelCase = node stacka.append(_UpperCamelCase) while stacka: # to find the reversed order of post order, store it in stack2 UpperCamelCase = stacka.pop() if n.left: stacka.append(n.left) if n.right: stacka.append(n.right) stacka.append(_UpperCamelCase) while stacka: # pop up from stack2 will be the post order print(stacka.pop().data , end=',') def lowercase__ ( _UpperCamelCase = "" , _UpperCamelCase=50 , _UpperCamelCase="*") -> str: """simple docstring""" if not s: return "\n" + width * char UpperCamelCase , UpperCamelCase = divmod(width - len(_UpperCamelCase) - 2 , 2) return F'{left * char} {s} {(left + extra) * char}' if __name__ == "__main__": import doctest doctest.testmod() print(prompt('''Binary Tree Traversals''')) __magic_name__ : TreeNode = build_tree() print(prompt('''Pre Order Traversal''')) pre_order(node) print(prompt() + '''\n''') print(prompt('''In Order Traversal''')) in_order(node) print(prompt() + '''\n''') print(prompt('''Post Order Traversal''')) post_order(node) print(prompt() + '''\n''') print(prompt('''Level Order Traversal''')) level_order(node) print(prompt() + '''\n''') print(prompt('''Actual Level Order Traversal''')) level_order_actual(node) print('''*''' * 50 + '''\n''') print(prompt('''Pre Order Traversal - Iteration Version''')) pre_order_iter(node) print(prompt() + '''\n''') print(prompt('''In Order Traversal - Iteration Version''')) in_order_iter(node) print(prompt() + '''\n''') print(prompt('''Post Order Traversal - Iteration Version''')) post_order_iter(node) print(prompt())
410
1
'''simple docstring''' from argparse import ArgumentParser from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline from ..utils import logging from . import BaseTransformersCLICommand A_ : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name def UpperCamelCase__ ( __magic_name__ : str ) -> Union[str, Any]: '''simple docstring''' if not path: return "pipe" for ext in PipelineDataFormat.SUPPORTED_FORMATS: if path.endswith(__magic_name__ ): return ext raise Exception( f"Unable to determine file format from file extension {path}. " f"Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}" ) def UpperCamelCase__ ( __magic_name__ : Optional[Any] ) -> int: '''simple docstring''' snake_case__ : Optional[Any] = pipeline( task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , ) snake_case__ : List[Any] = try_infer_format_from_ext(args.input ) if args.format == """infer""" else args.format snake_case__ : Dict = PipelineDataFormat.from_str( format=__magic_name__ , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , ) return RunCommand(__magic_name__ , __magic_name__ ) class __snake_case ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): snake_case__ : Optional[int] = nlp snake_case__ : Union[str, Any] = reader @staticmethod def __UpperCamelCase ( __SCREAMING_SNAKE_CASE ): snake_case__ : Optional[Any] = parser.add_parser("""run""" , help="""Run a pipeline through the CLI""" ) run_parser.add_argument("""--task""" , choices=get_supported_tasks() , help="""Task to run""" ) run_parser.add_argument("""--input""" , type=__SCREAMING_SNAKE_CASE , help="""Path to the file to use for inference""" ) run_parser.add_argument("""--output""" , type=__SCREAMING_SNAKE_CASE , help="""Path to the file that will be used post to write results.""" ) run_parser.add_argument("""--model""" , type=__SCREAMING_SNAKE_CASE , help="""Name or path to the model to instantiate.""" ) run_parser.add_argument("""--config""" , type=__SCREAMING_SNAKE_CASE , help="""Name or path to the model's config to instantiate.""" ) run_parser.add_argument( """--tokenizer""" , type=__SCREAMING_SNAKE_CASE , help="""Name of the tokenizer to use. (default: same as the model name)""" ) run_parser.add_argument( """--column""" , type=__SCREAMING_SNAKE_CASE , help="""Name of the column to use as input. (For multi columns input as QA use column1,columns2)""" , ) run_parser.add_argument( """--format""" , type=__SCREAMING_SNAKE_CASE , default="""infer""" , choices=PipelineDataFormat.SUPPORTED_FORMATS , help="""Input format to read from""" , ) run_parser.add_argument( """--device""" , type=__SCREAMING_SNAKE_CASE , default=-1 , help="""Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)""" , ) run_parser.add_argument("""--overwrite""" , action="""store_true""" , help="""Allow overwriting the output file.""" ) run_parser.set_defaults(func=__SCREAMING_SNAKE_CASE ) def __UpperCamelCase ( self ): snake_case__ , snake_case__ : List[Any] = self._nlp, [] for entry in self._reader: snake_case__ : Tuple = nlp(**__SCREAMING_SNAKE_CASE ) if self._reader.is_multi_columns else nlp(__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): outputs.append(__SCREAMING_SNAKE_CASE ) else: outputs += output # Saving data if self._nlp.binary_output: snake_case__ : int = self._reader.save_binary(__SCREAMING_SNAKE_CASE ) logger.warning(f"Current pipeline requires output to be in binary format, saving at {binary_path}" ) else: self._reader.save(__SCREAMING_SNAKE_CASE )
38
'''simple docstring''' import warnings from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A_ : Optional[int] = logging.get_logger(__name__) A_ : Tuple = { "nvidia/segformer-b0-finetuned-ade-512-512": ( "https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json" ), # See all SegFormer models at https://huggingface.co/models?filter=segformer } class __snake_case ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' lowerCamelCase__ = '''segformer''' def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=[2, 2, 2, 2] , __SCREAMING_SNAKE_CASE=[8, 4, 2, 1] , __SCREAMING_SNAKE_CASE=[3_2, 6_4, 1_6_0, 2_5_6] , __SCREAMING_SNAKE_CASE=[7, 3, 3, 3] , __SCREAMING_SNAKE_CASE=[4, 2, 2, 2] , __SCREAMING_SNAKE_CASE=[1, 2, 5, 8] , __SCREAMING_SNAKE_CASE=[4, 4, 4, 4] , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=1e-6 , __SCREAMING_SNAKE_CASE=2_5_6 , __SCREAMING_SNAKE_CASE=2_5_5 , **__SCREAMING_SNAKE_CASE , ): super().__init__(**__SCREAMING_SNAKE_CASE ) if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False: warnings.warn( """Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be""" """ removed, as the behaviour will default to that of reshape_last_stage = True.""" , __SCREAMING_SNAKE_CASE , ) snake_case__ : Dict = num_channels snake_case__ : Optional[Any] = num_encoder_blocks snake_case__ : Any = depths snake_case__ : Optional[int] = sr_ratios snake_case__ : Tuple = hidden_sizes snake_case__ : List[str] = patch_sizes snake_case__ : str = strides snake_case__ : Optional[int] = mlp_ratios snake_case__ : Optional[Any] = num_attention_heads snake_case__ : Dict = hidden_act snake_case__ : Optional[int] = hidden_dropout_prob snake_case__ : List[str] = attention_probs_dropout_prob snake_case__ : List[Any] = classifier_dropout_prob snake_case__ : int = initializer_range snake_case__ : List[str] = drop_path_rate snake_case__ : int = layer_norm_eps snake_case__ : List[Any] = decoder_hidden_size snake_case__ : List[Any] = kwargs.get("""reshape_last_stage""" , __SCREAMING_SNAKE_CASE ) snake_case__ : Dict = semantic_loss_ignore_index class __snake_case ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' lowerCamelCase__ = version.parse('''1.11''' ) @property def __UpperCamelCase ( self ): return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def __UpperCamelCase ( self ): return 1e-4 @property def __UpperCamelCase ( self ): return 1_2
38
1
import os import re import warnings from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer if TYPE_CHECKING: from ...tokenization_utils_base import TextInput from ...utils import logging _lowerCamelCase =logging.get_logger(__name__) _lowerCamelCase ={"vocab_file": "spiece.model"} _lowerCamelCase ={ "vocab_file": { "t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model", "t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model", "t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model", "t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model", "t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model", } } # TODO(PVP) - this should be removed in Transformers v5 _lowerCamelCase ={ "t5-small": 5_12, "t5-base": 5_12, "t5-large": 5_12, "t5-3b": 5_12, "t5-11b": 5_12, } _lowerCamelCase ="▁" class a_ ( lowerCamelCase_ ): """simple docstring""" __UpperCAmelCase = VOCAB_FILES_NAMES __UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCAmelCase = ['input_ids', 'attention_mask'] def __init__( self : str ,snake_case : int ,snake_case : Any="</s>" ,snake_case : Dict="<unk>" ,snake_case : Optional[int]="<pad>" ,snake_case : int=100 ,snake_case : Any=None ,snake_case : Optional[Dict[str, Any]] = None ,snake_case : Optional[int]=True ,**snake_case : str ,): # Add extra_ids to the special token list if extra_ids > 0 and additional_special_tokens is None: SCREAMING_SNAKE_CASE =[f'<extra_id_{i}>' for i in range(snake_case )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra_id special tokens SCREAMING_SNAKE_CASE =len(set(filter(lambda snake_case : bool('extra_id' in str(snake_case ) ) ,snake_case ) ) ) if extra_tokens != extra_ids: raise ValueError( f'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are' ' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids' ' tokens' ) if legacy: logger.warning_once( f'You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to' ' read the related pull request available at https://github.com/huggingface/transformers/pull/24565' ) SCREAMING_SNAKE_CASE =legacy SCREAMING_SNAKE_CASE ={} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=snake_case ,unk_token=snake_case ,pad_token=snake_case ,extra_ids=snake_case ,additional_special_tokens=snake_case ,sp_model_kwargs=self.sp_model_kwargs ,legacy=snake_case ,**snake_case ,) SCREAMING_SNAKE_CASE =vocab_file SCREAMING_SNAKE_CASE =extra_ids SCREAMING_SNAKE_CASE =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(snake_case ) @staticmethod def _lowerCAmelCase ( snake_case : str ,snake_case : List[Any] ,snake_case : Optional[int] ): if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes: SCREAMING_SNAKE_CASE =TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path] if init_max_model_length is not None and init_max_model_length != max_model_length: return init_max_model_length elif init_max_model_length is None: warnings.warn( 'This tokenizer was incorrectly instantiated with a model max length of' f' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this' ' behavior is kept to avoid breaking backwards compatibility when padding/encoding with' ' `truncation is True`.\n- Be aware that you SHOULD NOT rely on' f' {pretrained_model_name_or_path} automatically truncating your input to' f' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences' f' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with' ' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please' ' instantiate this tokenizer with `model_max_length` set to your preferred value.' ,snake_case ,) return max_model_length @property def _lowerCAmelCase ( self : Union[str, Any] ): return self.sp_model.get_piece_size() + self._extra_ids def _lowerCAmelCase ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE ={self.convert_ids_to_tokens(snake_case ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _lowerCAmelCase ( self : str ,snake_case : List[int] ,snake_case : Optional[List[int]] = None ,snake_case : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=snake_case ,token_ids_a=snake_case ,already_has_special_tokens=snake_case ) # normal case: some special tokens if token_ids_a is None: return ([0] * len(snake_case )) + [1] return ([0] * len(snake_case )) + [1] + ([0] * len(snake_case )) + [1] def _lowerCAmelCase ( self : Optional[int] ): return list( set(filter(lambda snake_case : bool(re.search(r'<extra_id_\d+>' ,snake_case ) ) is not None ,self.additional_special_tokens ) ) ) def _lowerCAmelCase ( self : Any ): return [self._convert_token_to_id(snake_case ) for token in self.get_sentinel_tokens()] def _lowerCAmelCase ( self : Tuple ,snake_case : List[int] ): if len(snake_case ) > 0 and token_ids[-1] == self.eos_token_id: warnings.warn( f'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated' ' eos tokens being added.' ) return token_ids else: return token_ids + [self.eos_token_id] def _lowerCAmelCase ( self : List[str] ,snake_case : List[int] ,snake_case : Optional[List[int]] = None ): SCREAMING_SNAKE_CASE =[self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def _lowerCAmelCase ( self : Dict ,snake_case : List[int] ,snake_case : Optional[List[int]] = None ): SCREAMING_SNAKE_CASE =self._add_eos_if_not_present(snake_case ) if token_ids_a is None: return token_ids_a else: SCREAMING_SNAKE_CASE =self._add_eos_if_not_present(snake_case ) return token_ids_a + token_ids_a def __getstate__( self : List[str] ): SCREAMING_SNAKE_CASE =self.__dict__.copy() SCREAMING_SNAKE_CASE =None return state def __setstate__( self : Optional[Any] ,snake_case : int ): SCREAMING_SNAKE_CASE =d # for backward compatibility if not hasattr(self ,'sp_model_kwargs' ): SCREAMING_SNAKE_CASE ={} SCREAMING_SNAKE_CASE =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _lowerCAmelCase ( self : Optional[Any] ,snake_case : "TextInput" ,**snake_case : Optional[Any] ): # Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at # the beginning of the text if not self.legacy: SCREAMING_SNAKE_CASE =SPIECE_UNDERLINE + text.replace(snake_case ,' ' ) return super().tokenize(snake_case ,**snake_case ) def _lowerCAmelCase ( self : int ,snake_case : Union[str, Any] ,**snake_case : Tuple ): if not self.legacy: SCREAMING_SNAKE_CASE =text.startswith(snake_case ) if is_first: SCREAMING_SNAKE_CASE =text[1:] SCREAMING_SNAKE_CASE =self.sp_model.encode(snake_case ,out_type=snake_case ) if not self.legacy and not is_first and not text.startswith(' ' ) and tokens[0].startswith(snake_case ): SCREAMING_SNAKE_CASE =([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:] return tokens def _lowerCAmelCase ( self : Optional[Any] ,snake_case : Dict ): if token.startswith('<extra_id_' ): SCREAMING_SNAKE_CASE =re.match(r'<extra_id_(\d+)>' ,snake_case ) SCREAMING_SNAKE_CASE =int(match.group(1 ) ) return self.vocab_size - num - 1 return self.sp_model.piece_to_id(snake_case ) def _lowerCAmelCase ( self : Optional[int] ,snake_case : int ): if index < self.sp_model.get_piece_size(): SCREAMING_SNAKE_CASE =self.sp_model.IdToPiece(snake_case ) else: SCREAMING_SNAKE_CASE =f'<extra_id_{self.vocab_size - 1 - index}>' return token def _lowerCAmelCase ( self : str ,snake_case : str ): SCREAMING_SNAKE_CASE =[] SCREAMING_SNAKE_CASE ='' SCREAMING_SNAKE_CASE =False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(snake_case ) + token SCREAMING_SNAKE_CASE =True SCREAMING_SNAKE_CASE =[] else: current_sub_tokens.append(snake_case ) SCREAMING_SNAKE_CASE =False out_string += self.sp_model.decode(snake_case ) return out_string.strip() def _lowerCAmelCase ( self : List[Any] ,snake_case : str ,snake_case : Optional[str] = None ): if not os.path.isdir(snake_case ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return SCREAMING_SNAKE_CASE =os.path.join( snake_case ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,snake_case ) elif not os.path.isfile(self.vocab_file ): with open(snake_case ,'wb' ) as fi: SCREAMING_SNAKE_CASE =self.sp_model.serialized_model_proto() fi.write(snake_case ) return (out_vocab_file,)
252
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) _lowerCamelCase ={ "configuration_perceiver": ["PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP", "PerceiverConfig", "PerceiverOnnxConfig"], "tokenization_perceiver": ["PerceiverTokenizer"], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase =["PerceiverFeatureExtractor"] _lowerCamelCase =["PerceiverImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase =[ "PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST", "PerceiverForImageClassificationConvProcessing", "PerceiverForImageClassificationFourier", "PerceiverForImageClassificationLearned", "PerceiverForMaskedLM", "PerceiverForMultimodalAutoencoding", "PerceiverForOpticalFlow", "PerceiverForSequenceClassification", "PerceiverLayer", "PerceiverModel", "PerceiverPreTrainedModel", ] if TYPE_CHECKING: from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig from .tokenization_perceiver import PerceiverTokenizer try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_perceiver import PerceiverFeatureExtractor from .image_processing_perceiver import PerceiverImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_perceiver import ( PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST, PerceiverForImageClassificationConvProcessing, PerceiverForImageClassificationFourier, PerceiverForImageClassificationLearned, PerceiverForMaskedLM, PerceiverForMultimodalAutoencoding, PerceiverForOpticalFlow, PerceiverForSequenceClassification, PerceiverLayer, PerceiverModel, PerceiverPreTrainedModel, ) else: import sys _lowerCamelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
252
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) SCREAMING_SNAKE_CASE_ = { """configuration_clip""": [ """CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CLIPConfig""", """CLIPOnnxConfig""", """CLIPTextConfig""", """CLIPVisionConfig""", ], """processing_clip""": ["""CLIPProcessor"""], """tokenization_clip""": ["""CLIPTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_ = ["""CLIPTokenizerFast"""] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_ = ["""CLIPFeatureExtractor"""] SCREAMING_SNAKE_CASE_ = ["""CLIPImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_ = [ """CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """CLIPModel""", """CLIPPreTrainedModel""", """CLIPTextModel""", """CLIPTextModelWithProjection""", """CLIPVisionModel""", """CLIPVisionModelWithProjection""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_ = [ """TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFCLIPModel""", """TFCLIPPreTrainedModel""", """TFCLIPTextModel""", """TFCLIPVisionModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_ = [ """FlaxCLIPModel""", """FlaxCLIPPreTrainedModel""", """FlaxCLIPTextModel""", """FlaxCLIPTextPreTrainedModel""", """FlaxCLIPVisionModel""", """FlaxCLIPVisionPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_clip import ( CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPConfig, CLIPOnnxConfig, CLIPTextConfig, CLIPVisionConfig, ) from .processing_clip import CLIPProcessor from .tokenization_clip import CLIPTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_clip_fast import CLIPTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_clip import CLIPFeatureExtractor from .image_processing_clip import CLIPImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clip import ( CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPModel, CLIPPreTrainedModel, CLIPTextModel, CLIPTextModelWithProjection, CLIPVisionModel, CLIPVisionModelWithProjection, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_clip import ( TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFCLIPModel, TFCLIPPreTrainedModel, TFCLIPTextModel, TFCLIPVisionModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_clip import ( FlaxCLIPModel, FlaxCLIPPreTrainedModel, FlaxCLIPTextModel, FlaxCLIPTextPreTrainedModel, FlaxCLIPVisionModel, FlaxCLIPVisionPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
237
"""simple docstring""" def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> List[str]: return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2 def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__=0 ) -> Tuple: return sorted(SCREAMING_SNAKE_CASE__, key=lambda SCREAMING_SNAKE_CASE__ : x[column] ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__=float("inf" ) ) -> Union[str, Any]: for i in range(points_counts - 1 ): for j in range(i + 1, SCREAMING_SNAKE_CASE__ ): a_ : Union[str, Any] = euclidean_distance_sqr(points[i], points[j] ) if current_dis < min_dis: a_ : Optional[Any] = current_dis return min_dis def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__=float("inf" ) ) -> Any: for i in range(min(6, points_counts - 1 ), SCREAMING_SNAKE_CASE__ ): for j in range(max(0, i - 6 ), SCREAMING_SNAKE_CASE__ ): a_ : Tuple = euclidean_distance_sqr(points[i], points[j] ) if current_dis < min_dis: a_ : Any = current_dis return min_dis def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> Any: # base case if points_counts <= 3: return dis_between_closest_pair(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) # recursion a_ : Optional[int] = points_counts // 2 a_ : Union[str, Any] = closest_pair_of_points_sqr( SCREAMING_SNAKE_CASE__, points_sorted_on_y[:mid], SCREAMING_SNAKE_CASE__ ) a_ : Optional[Any] = closest_pair_of_points_sqr( SCREAMING_SNAKE_CASE__, points_sorted_on_y[mid:], points_counts - mid ) a_ : Dict = min(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) a_ : Tuple = [] for point in points_sorted_on_x: if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis: cross_strip.append(SCREAMING_SNAKE_CASE__ ) a_ : List[str] = dis_between_closest_in_strip( SCREAMING_SNAKE_CASE__, len(SCREAMING_SNAKE_CASE__ ), SCREAMING_SNAKE_CASE__ ) return min(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> Optional[Any]: a_ : List[str] = column_based_sort(SCREAMING_SNAKE_CASE__, column=0 ) a_ : Union[str, Any] = column_based_sort(SCREAMING_SNAKE_CASE__, column=1 ) return ( closest_pair_of_points_sqr( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) ) ** 0.5 if __name__ == "__main__": SCREAMING_SNAKE_CASE_ = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)] print("""Distance:""", closest_pair_of_points(points, len(points)))
237
1
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_download, hf_hub_url from PIL import Image from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase_ : Dict = logging.get_logger(__name__) def UpperCAmelCase_ ( A ): _a : Any = SwinConfig( embed_dim=1_9_2 , depths=(2, 2, 1_8, 2) , num_heads=(6, 1_2, 2_4, 4_8) , window_size=1_2 , out_features=['stage2', 'stage3', 'stage4'] , ) _a : Dict = DetaConfig( backbone_config=UpperCamelCase__ , num_queries=9_0_0 , encoder_ffn_dim=2_0_4_8 , decoder_ffn_dim=2_0_4_8 , num_feature_levels=5 , assign_first_stage=UpperCamelCase__ , with_box_refine=UpperCamelCase__ , two_stage=UpperCamelCase__ , ) # set labels _a : int = 'huggingface/label-files' if "o365" in model_name: _a : List[str] = 3_6_6 _a : Any = 'object365-id2label.json' else: _a : Tuple = 9_1 _a : Tuple = 'coco-detection-id2label.json' _a : Dict = num_labels _a : Union[str, Any] = json.load(open(cached_download(hf_hub_url(UpperCamelCase__ , UpperCamelCase__ , repo_type='dataset' ) ) , 'r' ) ) _a : Optional[int] = {int(UpperCamelCase__ ): v for k, v in idalabel.items()} _a : str = idalabel _a : Union[str, Any] = {v: k for k, v in idalabel.items()} return config def UpperCAmelCase_ ( A ): _a : Any = [] # stem # fmt: off rename_keys.append(('backbone.0.body.patch_embed.proj.weight', 'model.backbone.model.embeddings.patch_embeddings.projection.weight') ) rename_keys.append(('backbone.0.body.patch_embed.proj.bias', 'model.backbone.model.embeddings.patch_embeddings.projection.bias') ) rename_keys.append(('backbone.0.body.patch_embed.norm.weight', 'model.backbone.model.embeddings.norm.weight') ) rename_keys.append(('backbone.0.body.patch_embed.norm.bias', 'model.backbone.model.embeddings.norm.bias') ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') ) rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') ) rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') ) rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') ) rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') ) rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') ) rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') ) rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') ) rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') ) rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') ) rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') ) rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') ) if i < 3: rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.reduction.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.reduction.weight''') ) rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.weight''') ) rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.bias''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.bias''') ) rename_keys.append(('backbone.0.body.norm1.weight', 'model.backbone.model.hidden_states_norms.stage2.weight') ) rename_keys.append(('backbone.0.body.norm1.bias', 'model.backbone.model.hidden_states_norms.stage2.bias') ) rename_keys.append(('backbone.0.body.norm2.weight', 'model.backbone.model.hidden_states_norms.stage3.weight') ) rename_keys.append(('backbone.0.body.norm2.bias', 'model.backbone.model.hidden_states_norms.stage3.bias') ) rename_keys.append(('backbone.0.body.norm3.weight', 'model.backbone.model.hidden_states_norms.stage4.weight') ) rename_keys.append(('backbone.0.body.norm3.bias', 'model.backbone.model.hidden_states_norms.stage4.bias') ) # transformer encoder for i in range(config.encoder_layers ): rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.weight''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.bias''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.weight''', f'''model.encoder.layers.{i}.self_attn.attention_weights.weight''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.bias''', f'''model.encoder.layers.{i}.self_attn.attention_weights.bias''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.weight''', f'''model.encoder.layers.{i}.self_attn.value_proj.weight''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.bias''', f'''model.encoder.layers.{i}.self_attn.value_proj.bias''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.weight''', f'''model.encoder.layers.{i}.self_attn.output_proj.weight''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.bias''', f'''model.encoder.layers.{i}.self_attn.output_proj.bias''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.weight''', f'''model.encoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''model.encoder.layers.{i}.self_attn_layer_norm.bias''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''model.encoder.layers.{i}.fc1.weight''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''model.encoder.layers.{i}.fc1.bias''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''model.encoder.layers.{i}.fc2.weight''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''model.encoder.layers.{i}.fc2.bias''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''model.encoder.layers.{i}.final_layer_norm.weight''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''model.encoder.layers.{i}.final_layer_norm.bias''') ) # transformer decoder for i in range(config.decoder_layers ): rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.weight''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.bias''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.weight''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''model.decoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''model.decoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.weight''', f'''model.decoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.bias''', f'''model.decoder.layers.{i}.self_attn_layer_norm.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''model.decoder.layers.{i}.fc1.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''model.decoder.layers.{i}.fc1.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''model.decoder.layers.{i}.fc2.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''model.decoder.layers.{i}.fc2.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''model.decoder.layers.{i}.final_layer_norm.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''model.decoder.layers.{i}.final_layer_norm.bias''') ) # fmt: on return rename_keys def UpperCAmelCase_ ( A , A , A ): _a : Any = dct.pop(UpperCamelCase__ ) _a : Tuple = val def UpperCAmelCase_ ( A , A ): _a : Tuple = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): _a : List[str] = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) _a : str = state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight''' ) _a : List[Any] = state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict _a : Tuple = in_proj_weight[:dim, :] _a : List[Any] = in_proj_bias[: dim] _a : Dict = in_proj_weight[ dim : dim * 2, : ] _a : List[Any] = in_proj_bias[ dim : dim * 2 ] _a : Tuple = in_proj_weight[ -dim :, : ] _a : List[Any] = in_proj_bias[-dim :] # fmt: on def UpperCAmelCase_ ( A , A ): _a : str = config.d_model for i in range(config.decoder_layers ): # read in weights + bias of input projection layer of self-attention _a : str = state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_weight''' ) _a : Optional[int] = state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict _a : int = in_proj_weight[:hidden_size, :] _a : Tuple = in_proj_bias[:hidden_size] _a : Dict = in_proj_weight[ hidden_size : hidden_size * 2, : ] _a : Optional[Any] = in_proj_bias[hidden_size : hidden_size * 2] _a : int = in_proj_weight[-hidden_size:, :] _a : Any = in_proj_bias[-hidden_size:] def UpperCAmelCase_ ( ): _a : Dict = 'http://images.cocodataset.org/val2017/000000039769.jpg' _a : List[str] = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ) return im @torch.no_grad() def UpperCAmelCase_ ( A , A , A ): _a : Optional[int] = get_deta_config(UpperCamelCase__ ) # load original state dict if model_name == "deta-swin-large": _a : Optional[Any] = hf_hub_download(repo_id='nielsr/deta-checkpoints' , filename='adet_swin_ft.pth' ) elif model_name == "deta-swin-large-o365": _a : Optional[int] = hf_hub_download(repo_id='jozhang97/deta-swin-l-o365' , filename='deta_swin_pt_o365.pth' ) else: raise ValueError(f'''Model name {model_name} not supported''' ) _a : Any = torch.load(UpperCamelCase__ , map_location='cpu' )['model'] # original state dict for name, param in state_dict.items(): print(UpperCamelCase__ , param.shape ) # rename keys _a : Any = create_rename_keys(UpperCamelCase__ ) for src, dest in rename_keys: rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) read_in_swin_q_k_v(UpperCamelCase__ , config.backbone_config ) read_in_decoder_q_k_v(UpperCamelCase__ , UpperCamelCase__ ) # fix some prefixes for key in state_dict.copy().keys(): if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key: _a : List[str] = state_dict.pop(UpperCamelCase__ ) _a : int = val if "input_proj" in key: _a : Dict = state_dict.pop(UpperCamelCase__ ) _a : List[Any] = val if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key: _a : Any = state_dict.pop(UpperCamelCase__ ) _a : Tuple = val # finally, create HuggingFace model and load state dict _a : Dict = DetaForObjectDetection(UpperCamelCase__ ) model.load_state_dict(UpperCamelCase__ ) model.eval() _a : Tuple = 'cuda' if torch.cuda.is_available() else 'cpu' model.to(UpperCamelCase__ ) # load image processor _a : Optional[Any] = DetaImageProcessor(format='coco_detection' ) # verify our conversion on image _a : str = prepare_img() _a : str = processor(images=UpperCamelCase__ , return_tensors='pt' ) _a : List[Any] = encoding['pixel_values'] _a : str = model(pixel_values.to(UpperCamelCase__ ) ) # verify logits print('Logits:' , outputs.logits[0, :3, :3] ) print('Boxes:' , outputs.pred_boxes[0, :3, :3] ) if model_name == "deta-swin-large": _a : Optional[Any] = torch.tensor( [[-7.63_08, -2.84_85, -5.37_37], [-7.20_37, -4.55_05, -4.80_27], [-7.29_43, -4.26_11, -4.66_17]] ) _a : List[Any] = torch.tensor([[0.49_87, 0.49_69, 0.99_99], [0.25_49, 0.54_98, 0.48_05], [0.54_98, 0.27_57, 0.05_69]] ) elif model_name == "deta-swin-large-o365": _a : Any = torch.tensor( [[-8.01_22, -3.57_20, -4.97_17], [-8.15_47, -3.68_86, -4.63_89], [-7.66_10, -3.61_94, -5.01_34]] ) _a : Union[str, Any] = torch.tensor([[0.25_23, 0.55_49, 0.48_81], [0.77_15, 0.41_49, 0.46_01], [0.55_03, 0.27_53, 0.05_75]] ) assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(UpperCamelCase__ ) , atol=1E-4 ) assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(UpperCamelCase__ ) , atol=1E-4 ) print('Everything ok!' ) if pytorch_dump_folder_path: # Save model and processor logger.info(f'''Saving PyTorch model and processor to {pytorch_dump_folder_path}...''' ) Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ ) model.save_pretrained(UpperCamelCase__ ) processor.save_pretrained(UpperCamelCase__ ) # Push to hub if push_to_hub: print('Pushing model and processor to hub...' ) model.push_to_hub(f'''jozhang97/{model_name}''' ) processor.push_to_hub(f'''jozhang97/{model_name}''' ) if __name__ == "__main__": UpperCAmelCase_ : List[str] = argparse.ArgumentParser() parser.add_argument( "--model_name", type=str, default="deta-swin-large", choices=["deta-swin-large", "deta-swin-large-o365"], help="Name of the model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model.", ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) UpperCAmelCase_ : Tuple = parser.parse_args() convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
703
'''simple docstring''' import math def UpperCAmelCase_ ( A , A ): '''simple docstring''' if initial_intensity < 0: raise ValueError('The value of intensity cannot be negative' ) # handling of negative values of initial intensity if angle < 0 or angle > 3_6_0: raise ValueError('In Malus Law, the angle is in the range 0-360 degrees' ) # handling of values out of allowed range return initial_intensity * (math.cos(math.radians(A ) ) ** 2) if __name__ == "__main__": import doctest doctest.testmod(name="malus_law")
424
0
"""simple docstring""" from __future__ import annotations import os from collections.abc import Mapping __magic_name__ : str = tuple[int, int] class __snake_case : def __init__( self: Tuple , A_: set[int] , A_: Mapping[EdgeT, int] ): __lowerCamelCase = vertices __lowerCamelCase = { (min(A_ ), max(A_ )): weight for edge, weight in edges.items() } def __a ( self: Optional[int] , A_: EdgeT , A_: int ): self.vertices.add(edge[0] ) self.vertices.add(edge[1] ) __lowerCamelCase = weight def __a ( self: Dict ): __lowerCamelCase = Graph({min(self.vertices )} , {} ) __lowerCamelCase = 42 __lowerCamelCase = 42 __lowerCamelCase = 42 __lowerCamelCase = 42 while len(subgraph.vertices ) < len(self.vertices ): __lowerCamelCase = max(self.edges.values() ) + 1 for edge, weight in self.edges.items(): if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices): if weight < min_weight: __lowerCamelCase = edge __lowerCamelCase = weight subgraph.add_edge(A_ , A_ ) return subgraph def a_ ( lowercase__ :str = "p107_network.txt" ): __lowerCamelCase = os.path.abspath(os.path.dirname(lowercase__ ) ) __lowerCamelCase = os.path.join(lowercase__, lowercase__ ) __lowerCamelCase = {} __lowerCamelCase = 42 __lowerCamelCase = 42 __lowerCamelCase = 42 with open(lowercase__ ) as f: __lowerCamelCase = f.read().strip().split("""\n""" ) __lowerCamelCase = [line.split(""",""" ) for line in data] for edgea in range(1, len(lowercase__ ) ): for edgea in range(lowercase__ ): if adjaceny_matrix[edgea][edgea] != "-": __lowerCamelCase = int(adjaceny_matrix[edgea][edgea] ) __lowerCamelCase = Graph(set(range(len(lowercase__ ) ) ), lowercase__ ) __lowerCamelCase = graph.prims_algorithm() __lowerCamelCase = sum(graph.edges.values() ) __lowerCamelCase = sum(subgraph.edges.values() ) return initial_total - optimal_total if __name__ == "__main__": print(f"""{solution() = }""")
281
"""simple docstring""" from __future__ import annotations def a_ ( lowercase__ :list[float] ): if len(lowercase__ ) < 2: raise ValueError("""Monogons and Digons are not polygons in the Euclidean space""" ) if any(i <= 0 for i in nums ): raise ValueError("""All values must be greater than 0""" ) __lowerCamelCase = nums.copy() copy_nums.sort() return copy_nums[-1] < sum(copy_nums[:-1] ) if __name__ == "__main__": import doctest doctest.testmod()
281
1
import torch from diffusers import DDPMScheduler from .test_schedulers import SchedulerCommonTest class lowerCAmelCase ( _a ): _SCREAMING_SNAKE_CASE : Dict =(DDPMScheduler,) def a__ ( self , **lowerCAmelCase__ ): _A= { 'num_train_timesteps': 1000, 'beta_start': 0.0001, 'beta_end': 0.02, 'beta_schedule': 'linear', 'variance_type': 'fixed_small', 'clip_sample': True, } config.update(**lowerCAmelCase__ ) return config def a__ ( self ): for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=lowerCAmelCase__ ) def a__ ( self ): for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=lowerCAmelCase__ , beta_end=lowerCAmelCase__ ) def a__ ( self ): for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=lowerCAmelCase__ ) def a__ ( self ): for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=lowerCAmelCase__ ) def a__ ( self ): for clip_sample in [True, False]: self.check_over_configs(clip_sample=lowerCAmelCase__ ) def a__ ( self ): self.check_over_configs(thresholding=lowerCAmelCase__ ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=lowerCAmelCase__ , prediction_type=lowerCAmelCase__ , sample_max_value=lowerCAmelCase__ , ) def a__ ( self ): for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=lowerCAmelCase__ ) def a__ ( self ): for t in [0, 500, 999]: self.check_over_forward(time_step=lowerCAmelCase__ ) def a__ ( self ): _A= self.scheduler_classes[0] _A= self.get_scheduler_config() _A= scheduler_class(**lowerCAmelCase__ ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00979 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5 def a__ ( self ): _A= self.scheduler_classes[0] _A= self.get_scheduler_config() _A= scheduler_class(**lowerCAmelCase__ ) _A= len(lowerCAmelCase__ ) _A= self.dummy_model() _A= self.dummy_sample_deter _A= torch.manual_seed(0 ) for t in reversed(range(lowerCAmelCase__ ) ): # 1. predict noise residual _A= model(lowerCAmelCase__ , lowerCAmelCase__ ) # 2. predict previous mean of sample x_t-1 _A= scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance _A= pred_prev_sample _A= torch.sum(torch.abs(lowerCAmelCase__ ) ) _A= torch.mean(torch.abs(lowerCAmelCase__ ) ) assert abs(result_sum.item() - 258.9606 ) < 1E-2 assert abs(result_mean.item() - 0.3372 ) < 1E-3 def a__ ( self ): _A= self.scheduler_classes[0] _A= self.get_scheduler_config(prediction_type='v_prediction' ) _A= scheduler_class(**lowerCAmelCase__ ) _A= len(lowerCAmelCase__ ) _A= self.dummy_model() _A= self.dummy_sample_deter _A= torch.manual_seed(0 ) for t in reversed(range(lowerCAmelCase__ ) ): # 1. predict noise residual _A= model(lowerCAmelCase__ , lowerCAmelCase__ ) # 2. predict previous mean of sample x_t-1 _A= scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance _A= pred_prev_sample _A= torch.sum(torch.abs(lowerCAmelCase__ ) ) _A= torch.mean(torch.abs(lowerCAmelCase__ ) ) assert abs(result_sum.item() - 202.0296 ) < 1E-2 assert abs(result_mean.item() - 0.2631 ) < 1E-3 def a__ ( self ): _A= self.scheduler_classes[0] _A= self.get_scheduler_config() _A= scheduler_class(**lowerCAmelCase__ ) _A= [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=lowerCAmelCase__ ) _A= scheduler.timesteps for i, timestep in enumerate(lowerCAmelCase__ ): if i == len(lowerCAmelCase__ ) - 1: _A= -1 else: _A= timesteps[i + 1] _A= scheduler.previous_timestep(lowerCAmelCase__ ) _A= prev_t.item() self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ ) def a__ ( self ): _A= self.scheduler_classes[0] _A= self.get_scheduler_config() _A= scheduler_class(**lowerCAmelCase__ ) _A= [100, 87, 50, 51, 0] with self.assertRaises(lowerCAmelCase__ , msg='`custom_timesteps` must be in descending order.' ): scheduler.set_timesteps(timesteps=lowerCAmelCase__ ) def a__ ( self ): _A= self.scheduler_classes[0] _A= self.get_scheduler_config() _A= scheduler_class(**lowerCAmelCase__ ) _A= [100, 87, 50, 1, 0] _A= len(lowerCAmelCase__ ) with self.assertRaises(lowerCAmelCase__ , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ): scheduler.set_timesteps(num_inference_steps=lowerCAmelCase__ , timesteps=lowerCAmelCase__ ) def a__ ( self ): _A= self.scheduler_classes[0] _A= self.get_scheduler_config() _A= scheduler_class(**lowerCAmelCase__ ) _A= [scheduler.config.num_train_timesteps] with self.assertRaises( lowerCAmelCase__ , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ): scheduler.set_timesteps(timesteps=lowerCAmelCase__ )
476
UpperCAmelCase_ = { '''Pillow''': '''Pillow<10.0.0''', '''accelerate''': '''accelerate>=0.20.3''', '''av''': '''av==9.2.0''', '''beautifulsoup4''': '''beautifulsoup4''', '''black''': '''black~=23.1''', '''codecarbon''': '''codecarbon==1.2.0''', '''cookiecutter''': '''cookiecutter==1.7.3''', '''dataclasses''': '''dataclasses''', '''datasets''': '''datasets!=2.5.0''', '''decord''': '''decord==0.6.0''', '''deepspeed''': '''deepspeed>=0.9.3''', '''diffusers''': '''diffusers''', '''dill''': '''dill<0.3.5''', '''evaluate''': '''evaluate>=0.2.0''', '''fairscale''': '''fairscale>0.3''', '''faiss-cpu''': '''faiss-cpu''', '''fastapi''': '''fastapi''', '''filelock''': '''filelock''', '''flax''': '''flax>=0.4.1,<=0.7.0''', '''ftfy''': '''ftfy''', '''fugashi''': '''fugashi>=1.0''', '''GitPython''': '''GitPython<3.1.19''', '''hf-doc-builder''': '''hf-doc-builder>=0.3.0''', '''huggingface-hub''': '''huggingface-hub>=0.14.1,<1.0''', '''importlib_metadata''': '''importlib_metadata''', '''ipadic''': '''ipadic>=1.0.0,<2.0''', '''isort''': '''isort>=5.5.4''', '''jax''': '''jax>=0.2.8,!=0.3.2,<=0.4.13''', '''jaxlib''': '''jaxlib>=0.1.65,<=0.4.13''', '''jieba''': '''jieba''', '''kenlm''': '''kenlm''', '''keras-nlp''': '''keras-nlp>=0.3.1''', '''librosa''': '''librosa''', '''nltk''': '''nltk''', '''natten''': '''natten>=0.14.6''', '''numpy''': '''numpy>=1.17''', '''onnxconverter-common''': '''onnxconverter-common''', '''onnxruntime-tools''': '''onnxruntime-tools>=1.4.2''', '''onnxruntime''': '''onnxruntime>=1.4.0''', '''opencv-python''': '''opencv-python''', '''optuna''': '''optuna''', '''optax''': '''optax>=0.0.8,<=0.1.4''', '''packaging''': '''packaging>=20.0''', '''parameterized''': '''parameterized''', '''phonemizer''': '''phonemizer''', '''protobuf''': '''protobuf''', '''psutil''': '''psutil''', '''pyyaml''': '''pyyaml>=5.1''', '''pydantic''': '''pydantic<2''', '''pytest''': '''pytest>=7.2.0''', '''pytest-timeout''': '''pytest-timeout''', '''pytest-xdist''': '''pytest-xdist''', '''python''': '''python>=3.8.0''', '''ray[tune]''': '''ray[tune]''', '''regex''': '''regex!=2019.12.17''', '''requests''': '''requests''', '''rhoknp''': '''rhoknp>=1.1.0,<1.3.1''', '''rjieba''': '''rjieba''', '''rouge-score''': '''rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1''', '''ruff''': '''ruff>=0.0.241,<=0.0.259''', '''sacrebleu''': '''sacrebleu>=1.4.12,<2.0.0''', '''sacremoses''': '''sacremoses''', '''safetensors''': '''safetensors>=0.3.1''', '''sagemaker''': '''sagemaker>=2.31.0''', '''scikit-learn''': '''scikit-learn''', '''sentencepiece''': '''sentencepiece>=0.1.91,!=0.1.92''', '''sigopt''': '''sigopt''', '''starlette''': '''starlette''', '''sudachipy''': '''sudachipy>=0.6.6''', '''sudachidict_core''': '''sudachidict_core>=20220729''', '''tensorflow-cpu''': '''tensorflow-cpu>=2.6,<2.14''', '''tensorflow''': '''tensorflow>=2.6,<2.14''', '''tensorflow-text''': '''tensorflow-text<2.14''', '''tf2onnx''': '''tf2onnx''', '''timeout-decorator''': '''timeout-decorator''', '''timm''': '''timm''', '''tokenizers''': '''tokenizers>=0.11.1,!=0.11.3,<0.14''', '''torch''': '''torch>=1.9,!=1.12.0''', '''torchaudio''': '''torchaudio''', '''torchvision''': '''torchvision''', '''pyctcdecode''': '''pyctcdecode>=0.4.0''', '''tqdm''': '''tqdm>=4.27''', '''unidic''': '''unidic>=1.0.2''', '''unidic_lite''': '''unidic_lite>=1.0.7''', '''urllib3''': '''urllib3<2.0.0''', '''uvicorn''': '''uvicorn''', }
476
1
import json import os import pickle import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers import is_faiss_available from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bart.tokenization_bart import BartTokenizer from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch if is_faiss_available(): import faiss @require_faiss class snake_case__ ( __snake_case ): '''simple docstring''' def UpperCamelCase ( self : List[str] ) -> Union[str, Any]: UpperCAmelCase_ = tempfile.mkdtemp() UpperCAmelCase_ = 8 # DPR tok UpperCAmelCase_ = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] UpperCAmelCase_ = os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ ) UpperCAmelCase_ = os.path.join(lowerCAmelCase_ , DPR_VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) # BART tok UpperCAmelCase_ = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', ] UpperCAmelCase_ = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) ) UpperCAmelCase_ = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] UpperCAmelCase_ = {'''unk_token''': '''<unk>'''} UpperCAmelCase_ = os.path.join(self.tmpdirname , '''bart_tokenizer''' ) os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ ) UpperCAmelCase_ = os.path.join(lowerCAmelCase_ , BART_VOCAB_FILES_NAMES['''vocab_file'''] ) UpperCAmelCase_ = os.path.join(lowerCAmelCase_ , BART_VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(lowerCAmelCase_ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(lowerCAmelCase_ ) ) def UpperCamelCase ( self : Union[str, Any] ) -> DPRQuestionEncoderTokenizer: return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) ) def UpperCamelCase ( self : str ) -> DPRContextEncoderTokenizer: return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) ) def UpperCamelCase ( self : str ) -> BartTokenizer: return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) ) def UpperCamelCase ( self : Dict ) -> List[str]: shutil.rmtree(self.tmpdirname ) def UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]: UpperCAmelCase_ = Dataset.from_dict( { '''id''': ['''0''', '''1'''], '''text''': ['''foo''', '''bar'''], '''title''': ['''Foo''', '''Bar'''], '''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )], } ) dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT ) return dataset def UpperCamelCase ( self : Dict ) -> List[str]: UpperCAmelCase_ = self.get_dummy_dataset() UpperCAmelCase_ = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , ) with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset: UpperCAmelCase_ = dataset UpperCAmelCase_ = RagRetriever( lowerCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) return retriever def UpperCamelCase ( self : List[str] , lowerCAmelCase_ : bool ) -> Tuple: UpperCAmelCase_ = self.get_dummy_dataset() UpperCAmelCase_ = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , ) if from_disk: UpperCAmelCase_ = os.path.join(self.tmpdirname , '''dataset''' ) UpperCAmelCase_ = os.path.join(self.tmpdirname , '''index.faiss''' ) dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) ) dataset.drop_index('''embeddings''' ) dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) ) del dataset UpperCAmelCase_ = RagRetriever( lowerCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) else: UpperCAmelCase_ = RagRetriever( lowerCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , lowerCAmelCase_ ) , ) return retriever def UpperCamelCase ( self : List[Any] ) -> str: UpperCAmelCase_ = Dataset.from_dict( { '''id''': ['''0''', '''1'''], '''text''': ['''foo''', '''bar'''], '''title''': ['''Foo''', '''Bar'''], '''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )], } ) dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT ) UpperCAmelCase_ = os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' ) dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' ) pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) ) UpperCAmelCase_ = os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' ) UpperCAmelCase_ = {sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset} pickle.dump(lowerCAmelCase_ , open(lowerCAmelCase_ , '''wb''' ) ) UpperCAmelCase_ = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , ) UpperCAmelCase_ = RagRetriever( lowerCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() ) return retriever def UpperCamelCase ( self : Optional[Any] ) -> Optional[int]: UpperCAmelCase_ = 1 UpperCAmelCase_ = self.get_dummy_canonical_hf_index_retriever() UpperCAmelCase_ = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ = retriever.retrieve(lowerCAmelCase_ , n_docs=lowerCAmelCase_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(lowerCAmelCase_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''id'''] ) , lowerCAmelCase_ ) self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def UpperCamelCase ( self : Union[str, Any] ) -> List[Any]: UpperCAmelCase_ = self.get_dummy_canonical_hf_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset: UpperCAmelCase_ = self.get_dummy_dataset() retriever.save_pretrained(lowerCAmelCase_ ) UpperCAmelCase_ = RagRetriever.from_pretrained(lowerCAmelCase_ ) self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) UpperCAmelCase_ = retriever.retrieve(lowerCAmelCase_ , n_docs=1 ) self.assertTrue(out is not None ) def UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]: UpperCAmelCase_ = 1 UpperCAmelCase_ = self.get_dummy_custom_hf_index_retriever(from_disk=lowerCAmelCase_ ) UpperCAmelCase_ = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ = retriever.retrieve(lowerCAmelCase_ , n_docs=lowerCAmelCase_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(lowerCAmelCase_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''id'''] ) , lowerCAmelCase_ ) self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def UpperCamelCase ( self : int ) -> List[Any]: UpperCAmelCase_ = self.get_dummy_custom_hf_index_retriever(from_disk=lowerCAmelCase_ ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(lowerCAmelCase_ ) UpperCAmelCase_ = RagRetriever.from_pretrained(lowerCAmelCase_ ) self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) UpperCAmelCase_ = retriever.retrieve(lowerCAmelCase_ , n_docs=1 ) self.assertTrue(out is not None ) def UpperCamelCase ( self : List[str] ) -> int: UpperCAmelCase_ = 1 UpperCAmelCase_ = self.get_dummy_custom_hf_index_retriever(from_disk=lowerCAmelCase_ ) UpperCAmelCase_ = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ = retriever.retrieve(lowerCAmelCase_ , n_docs=lowerCAmelCase_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(lowerCAmelCase_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''id'''] ) , lowerCAmelCase_ ) self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def UpperCamelCase ( self : List[Any] ) -> Dict: UpperCAmelCase_ = self.get_dummy_custom_hf_index_retriever(from_disk=lowerCAmelCase_ ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(lowerCAmelCase_ ) UpperCAmelCase_ = RagRetriever.from_pretrained(lowerCAmelCase_ ) self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) UpperCAmelCase_ = retriever.retrieve(lowerCAmelCase_ , n_docs=1 ) self.assertTrue(out is not None ) def UpperCamelCase ( self : List[str] ) -> str: UpperCAmelCase_ = 1 UpperCAmelCase_ = self.get_dummy_legacy_index_retriever() UpperCAmelCase_ = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ = retriever.retrieve(lowerCAmelCase_ , n_docs=lowerCAmelCase_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(lowerCAmelCase_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''text'''] ) , lowerCAmelCase_ ) self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def UpperCamelCase ( self : Dict ) -> str: UpperCAmelCase_ = self.get_dummy_legacy_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(lowerCAmelCase_ ) UpperCAmelCase_ = RagRetriever.from_pretrained(lowerCAmelCase_ ) self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) UpperCAmelCase_ = retriever.retrieve(lowerCAmelCase_ , n_docs=1 ) self.assertTrue(out is not None ) @require_torch @require_tokenizers @require_sentencepiece def UpperCamelCase ( self : List[str] ) -> int: import torch UpperCAmelCase_ = 1 UpperCAmelCase_ = self.get_dummy_canonical_hf_index_retriever() UpperCAmelCase_ = [[5, 7], [10, 11]] UpperCAmelCase_ = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) UpperCAmelCase_ = retriever(lowerCAmelCase_ , lowerCAmelCase_ , prefix=retriever.config.generator.prefix , n_docs=lowerCAmelCase_ ) UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ = ( out['''context_input_ids'''], out['''context_attention_mask'''], out['''retrieved_doc_embeds'''], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertIsInstance(lowerCAmelCase_ , np.ndarray ) UpperCAmelCase_ = retriever( lowerCAmelCase_ , lowerCAmelCase_ , prefix=retriever.config.generator.prefix , n_docs=lowerCAmelCase_ , return_tensors='''pt''' , ) UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ = ( # noqa: F841 out['''context_input_ids'''], out['''context_attention_mask'''], out['''retrieved_doc_embeds'''], out['''doc_ids'''], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(lowerCAmelCase_ , torch.Tensor ) self.assertIsInstance(lowerCAmelCase_ , torch.Tensor ) self.assertIsInstance(lowerCAmelCase_ , torch.Tensor ) @require_torch @require_tokenizers @require_sentencepiece def UpperCamelCase ( self : Tuple ) -> int: UpperCAmelCase_ = self.get_dpr_ctx_encoder_tokenizer() UpperCAmelCase_ = 1 UpperCAmelCase_ = self.get_dummy_custom_hf_index_retriever(from_disk=lowerCAmelCase_ ) retriever.set_ctx_encoder_tokenizer(lowerCAmelCase_ ) UpperCAmelCase_ = [[5, 7], [10, 11]] UpperCAmelCase_ = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) UpperCAmelCase_ = retriever(lowerCAmelCase_ , lowerCAmelCase_ , prefix=retriever.config.generator.prefix , n_docs=lowerCAmelCase_ ) self.assertEqual( len(lowerCAmelCase_ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs self.assertEqual( all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , lowerCAmelCase_ ) # check for doc token related keys in dictionary.
121
import logging import os from typing import List, TextIO, Union from conllu import parse_incr from utils_ner import InputExample, Split, TokenClassificationTask _lowerCamelCase : List[Any] = logging.getLogger(__name__) class snake_case__ ( __snake_case ): '''simple docstring''' def __init__( self : Tuple , lowerCAmelCase_ : Tuple=-1 ) -> List[Any]: # in NER datasets, the last column is usually reserved for NER label UpperCAmelCase_ = label_idx def UpperCamelCase ( self : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[Split, str] ) -> List[InputExample]: if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): UpperCAmelCase_ = mode.value UpperCAmelCase_ = os.path.join(lowerCAmelCase_ , F'''{mode}.txt''' ) UpperCAmelCase_ = 1 UpperCAmelCase_ = [] with open(lowerCAmelCase_ , encoding='''utf-8''' ) as f: UpperCAmelCase_ = [] UpperCAmelCase_ = [] for line in f: if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n": if words: examples.append(InputExample(guid=F'''{mode}-{guid_index}''' , words=lowerCAmelCase_ , labels=lowerCAmelCase_ ) ) guid_index += 1 UpperCAmelCase_ = [] UpperCAmelCase_ = [] else: UpperCAmelCase_ = line.split(''' ''' ) words.append(splits[0] ) if len(lowerCAmelCase_ ) > 1: labels.append(splits[self.label_idx].replace('''\n''' , '''''' ) ) else: # Examples could have no label for mode = "test" labels.append('''O''' ) if words: examples.append(InputExample(guid=F'''{mode}-{guid_index}''' , words=lowerCAmelCase_ , labels=lowerCAmelCase_ ) ) return examples def UpperCamelCase ( self : Tuple , lowerCAmelCase_ : TextIO , lowerCAmelCase_ : TextIO , lowerCAmelCase_ : List ) -> str: UpperCAmelCase_ = 0 for line in test_input_reader: if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n": writer.write(lowerCAmelCase_ ) if not preds_list[example_id]: example_id += 1 elif preds_list[example_id]: UpperCAmelCase_ = line.split()[0] + ''' ''' + preds_list[example_id].pop(0 ) + '''\n''' writer.write(lowerCAmelCase_ ) else: logger.warning('''Maximum sequence length exceeded: No prediction for \'%s\'.''' , line.split()[0] ) def UpperCamelCase ( self : Union[str, Any] , lowerCAmelCase_ : str ) -> List[str]: if path: with open(lowerCAmelCase_ , '''r''' ) as f: UpperCAmelCase_ = f.read().splitlines() if "O" not in labels: UpperCAmelCase_ = ['''O'''] + labels return labels else: return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] class snake_case__ ( __snake_case ): '''simple docstring''' def __init__( self : str ) -> Dict: # in CONLL2003 dataset chunk column is second-to-last super().__init__(label_idx=-2 ) def UpperCamelCase ( self : Union[str, Any] , lowerCAmelCase_ : str ) -> List[str]: if path: with open(lowerCAmelCase_ , '''r''' ) as f: UpperCAmelCase_ = f.read().splitlines() if "O" not in labels: UpperCAmelCase_ = ['''O'''] + labels return labels else: return [ "O", "B-ADVP", "B-INTJ", "B-LST", "B-PRT", "B-NP", "B-SBAR", "B-VP", "B-ADJP", "B-CONJP", "B-PP", "I-ADVP", "I-INTJ", "I-LST", "I-PRT", "I-NP", "I-SBAR", "I-VP", "I-ADJP", "I-CONJP", "I-PP", ] class snake_case__ ( __snake_case ): '''simple docstring''' def UpperCamelCase ( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[Split, str] ) -> List[InputExample]: if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): UpperCAmelCase_ = mode.value UpperCAmelCase_ = os.path.join(lowerCAmelCase_ , F'''{mode}.txt''' ) UpperCAmelCase_ = 1 UpperCAmelCase_ = [] with open(lowerCAmelCase_ , encoding='''utf-8''' ) as f: for sentence in parse_incr(lowerCAmelCase_ ): UpperCAmelCase_ = [] UpperCAmelCase_ = [] for token in sentence: words.append(token['''form'''] ) labels.append(token['''upos'''] ) assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ ) if words: examples.append(InputExample(guid=F'''{mode}-{guid_index}''' , words=lowerCAmelCase_ , labels=lowerCAmelCase_ ) ) guid_index += 1 return examples def UpperCamelCase ( self : int , lowerCAmelCase_ : TextIO , lowerCAmelCase_ : TextIO , lowerCAmelCase_ : List ) -> List[str]: UpperCAmelCase_ = 0 for sentence in parse_incr(lowerCAmelCase_ ): UpperCAmelCase_ = preds_list[example_id] UpperCAmelCase_ = '''''' for token in sentence: out += F'''{token['form']} ({token['upos']}|{s_p.pop(0 )}) ''' out += "\n" writer.write(lowerCAmelCase_ ) example_id += 1 def UpperCamelCase ( self : Dict , lowerCAmelCase_ : str ) -> List[str]: if path: with open(lowerCAmelCase_ , '''r''' ) as f: return f.read().splitlines() else: return [ "ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X", ]
121
1
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { 'tanreinama/GPTSAN-2.8B-spout_is_uniform': ( 'https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json' ), } class _A ( UpperCAmelCase_ ): lowercase_ : str = '''gptsan-japanese''' lowercase_ : int = [ '''past_key_values''', ] lowercase_ : str = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self : Dict , lowerCamelCase__ : Any=3_60_00 , lowerCamelCase__ : List[Any]=12_80 , lowerCamelCase__ : Tuple=10_24 , lowerCamelCase__ : Tuple=81_92 , lowerCamelCase__ : Optional[Any]=40_96 , lowerCamelCase__ : List[str]=1_28 , lowerCamelCase__ : int=10 , lowerCamelCase__ : int=0 , lowerCamelCase__ : List[str]=16 , lowerCamelCase__ : Any=16 , lowerCamelCase__ : Optional[Any]=1_28 , lowerCamelCase__ : Optional[Any]=0.0 , lowerCamelCase__ : Optional[Any]=1e-5 , lowerCamelCase__ : Optional[int]=False , lowerCamelCase__ : Any=0.0 , lowerCamelCase__ : Tuple="float32" , lowerCamelCase__ : int=False , lowerCamelCase__ : List[Any]=False , lowerCamelCase__ : int=False , lowerCamelCase__ : Optional[Any]=0.002 , lowerCamelCase__ : Optional[Any]=False , lowerCamelCase__ : Dict=True , lowerCamelCase__ : Optional[int]=3_59_98 , lowerCamelCase__ : Optional[Any]=3_59_95 , lowerCamelCase__ : List[Any]=3_59_99 , **lowerCamelCase__ : Tuple , ): """simple docstring""" __UpperCamelCase : str = vocab_size __UpperCamelCase : Optional[int] = max_position_embeddings __UpperCamelCase : List[Any] = d_model __UpperCamelCase : Tuple = d_ff __UpperCamelCase : Tuple = d_ext __UpperCamelCase : Tuple = d_spout __UpperCamelCase : Union[str, Any] = num_switch_layers __UpperCamelCase : str = num_ext_layers __UpperCamelCase : Dict = num_switch_layers + num_ext_layers __UpperCamelCase : Dict = num_heads __UpperCamelCase : List[Any] = num_experts __UpperCamelCase : Optional[int] = expert_capacity __UpperCamelCase : Dict = dropout_rate __UpperCamelCase : Any = layer_norm_epsilon __UpperCamelCase : int = router_bias __UpperCamelCase : List[str] = router_jitter_noise __UpperCamelCase : Optional[int] = router_dtype __UpperCamelCase : Any = router_ignore_padding_tokens __UpperCamelCase : Optional[Any] = output_hidden_states __UpperCamelCase : int = output_attentions __UpperCamelCase : Dict = initializer_factor __UpperCamelCase : List[str] = output_router_logits __UpperCamelCase : str = use_cache super().__init__( separator_token_id=lowerCamelCase__ , pad_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ , )
515
from typing import Optional from .. import Features, NamedSplit from ..packaged_modules.text.text import Text from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class _A ( UpperCAmelCase_ ): def __init__( self : Optional[Any] , lowerCamelCase__ : NestedDataStructureLike[PathLike] , lowerCamelCase__ : Optional[NamedSplit] = None , lowerCamelCase__ : Optional[Features] = None , lowerCamelCase__ : str = None , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : Optional[int] = None , **lowerCamelCase__ : Any , ): """simple docstring""" super().__init__( lowerCamelCase__ , split=lowerCamelCase__ , features=lowerCamelCase__ , cache_dir=lowerCamelCase__ , keep_in_memory=lowerCamelCase__ , streaming=lowerCamelCase__ , num_proc=lowerCamelCase__ , **lowerCamelCase__ , ) __UpperCamelCase : Dict = path_or_paths if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else {self.split: path_or_paths} __UpperCamelCase : int = Text( cache_dir=lowerCamelCase__ , data_files=lowerCamelCase__ , features=lowerCamelCase__ , **lowerCamelCase__ , ) def a ( self : Optional[int] ): """simple docstring""" if self.streaming: __UpperCamelCase : Union[str, Any] = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: __UpperCamelCase : Any = None __UpperCamelCase : int = None __UpperCamelCase : int = None __UpperCamelCase : Optional[Any] = None self.builder.download_and_prepare( download_config=lowerCamelCase__ , download_mode=lowerCamelCase__ , verification_mode=lowerCamelCase__ , base_path=lowerCamelCase__ , num_proc=self.num_proc , ) __UpperCamelCase : Tuple = self.builder.as_dataset( split=self.split , verification_mode=lowerCamelCase__ , in_memory=self.keep_in_memory ) return dataset
515
1
'''simple docstring''' import json import os import re import unittest from transformers import CodeGenTokenizer, CodeGenTokenizerFast from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __UpperCAmelCase ( __UpperCamelCase , unittest.TestCase ): __A : Tuple = CodeGenTokenizer __A : Dict = CodeGenTokenizerFast __A : List[Any] = True __A : Any = {'add_prefix_space': True} __A : Optional[Any] = False def UpperCAmelCase_ ( self ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowerCAmelCase_ = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', '<|endoftext|>', ] lowerCAmelCase_ = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) ) lowerCAmelCase_ = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] lowerCAmelCase_ = {'unk_token': '<unk>'} lowerCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) lowerCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(_lowerCamelCase ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(_lowerCamelCase ) ) def UpperCAmelCase_ ( self , **_lowerCamelCase ): kwargs.update(self.special_tokens_map ) return CodeGenTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase ) def UpperCAmelCase_ ( self , **_lowerCamelCase ): kwargs.update(self.special_tokens_map ) return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **_lowerCamelCase ) def UpperCAmelCase_ ( self , _lowerCamelCase ): lowerCAmelCase_ = 'lower newer' lowerCAmelCase_ = 'lower newer' return input_text, output_text def UpperCAmelCase_ ( self ): lowerCAmelCase_ = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) lowerCAmelCase_ = 'lower newer' lowerCAmelCase_ = ['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er'] lowerCAmelCase_ = tokenizer.tokenize(_lowerCamelCase , add_prefix_space=_lowerCamelCase ) self.assertListEqual(_lowerCamelCase , _lowerCamelCase ) lowerCAmelCase_ = tokens + [tokenizer.unk_token] lowerCAmelCase_ = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , _lowerCamelCase ) def UpperCAmelCase_ ( self ): if not self.test_rust_tokenizer: return lowerCAmelCase_ = self.get_tokenizer() lowerCAmelCase_ = self.get_rust_tokenizer(add_prefix_space=_lowerCamelCase ) lowerCAmelCase_ = 'lower newer' # Testing tokenization lowerCAmelCase_ = tokenizer.tokenize(_lowerCamelCase , add_prefix_space=_lowerCamelCase ) lowerCAmelCase_ = rust_tokenizer.tokenize(_lowerCamelCase ) self.assertListEqual(_lowerCamelCase , _lowerCamelCase ) # Testing conversion to ids without special tokens lowerCAmelCase_ = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase , add_prefix_space=_lowerCamelCase ) lowerCAmelCase_ = rust_tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase ) self.assertListEqual(_lowerCamelCase , _lowerCamelCase ) # Testing conversion to ids with special tokens lowerCAmelCase_ = self.get_rust_tokenizer(add_prefix_space=_lowerCamelCase ) lowerCAmelCase_ = tokenizer.encode(_lowerCamelCase , add_prefix_space=_lowerCamelCase ) lowerCAmelCase_ = rust_tokenizer.encode(_lowerCamelCase ) self.assertListEqual(_lowerCamelCase , _lowerCamelCase ) # Testing the unknown token lowerCAmelCase_ = tokens + [rust_tokenizer.unk_token] lowerCAmelCase_ = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , _lowerCamelCase ) def UpperCAmelCase_ ( self , *_lowerCamelCase , **_lowerCamelCase ): # It's very difficult to mix/test pretokenization with byte-level # And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string) pass def UpperCAmelCase_ ( self , _lowerCamelCase=15 ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase ) # Simple input lowerCAmelCase_ = 'This is a simple input' lowerCAmelCase_ = ['This is a simple input 1', 'This is a simple input 2'] lowerCAmelCase_ = ('This is a simple input', 'This is a pair') lowerCAmelCase_ = [ ('This is a simple input 1', 'This is a simple input 2'), ('This is a simple pair 1', 'This is a simple pair 2'), ] # Simple input tests self.assertRaises(_lowerCamelCase , tokenizer_r.encode , _lowerCamelCase , max_length=_lowerCamelCase , padding='''max_length''' ) # Simple input self.assertRaises(_lowerCamelCase , tokenizer_r.encode_plus , _lowerCamelCase , max_length=_lowerCamelCase , padding='''max_length''' ) # Simple input self.assertRaises( _lowerCamelCase , tokenizer_r.batch_encode_plus , _lowerCamelCase , max_length=_lowerCamelCase , padding='''max_length''' , ) # Pair input self.assertRaises(_lowerCamelCase , tokenizer_r.encode , _lowerCamelCase , max_length=_lowerCamelCase , padding='''max_length''' ) # Pair input self.assertRaises(_lowerCamelCase , tokenizer_r.encode_plus , _lowerCamelCase , max_length=_lowerCamelCase , padding='''max_length''' ) # Pair input self.assertRaises( _lowerCamelCase , tokenizer_r.batch_encode_plus , _lowerCamelCase , max_length=_lowerCamelCase , padding='''max_length''' , ) def UpperCAmelCase_ ( self ): lowerCAmelCase_ = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='''<pad>''' ) # Simple input lowerCAmelCase_ = 'This is a simple input' lowerCAmelCase_ = ['This is a simple input looooooooong', 'This is a simple input'] lowerCAmelCase_ = ('This is a simple input', 'This is a pair') lowerCAmelCase_ = [ ('This is a simple input loooooong', 'This is a simple input'), ('This is a simple pair loooooong', 'This is a simple pair'), ] lowerCAmelCase_ = tokenizer.pad_token_id lowerCAmelCase_ = tokenizer(_lowerCamelCase , padding='''max_length''' , max_length=30 , return_tensors='''np''' ) lowerCAmelCase_ = tokenizer(_lowerCamelCase , padding=_lowerCamelCase , truncate=_lowerCamelCase , return_tensors='''np''' ) lowerCAmelCase_ = tokenizer(*_lowerCamelCase , padding='''max_length''' , max_length=60 , return_tensors='''np''' ) lowerCAmelCase_ = tokenizer(_lowerCamelCase , padding=_lowerCamelCase , truncate=_lowerCamelCase , return_tensors='''np''' ) # s # test single string max_length padding self.assertEqual(out_s['''input_ids'''].shape[-1] , 30 ) self.assertTrue(pad_token_id in out_s['''input_ids'''] ) self.assertTrue(0 in out_s['''attention_mask'''] ) # s2 # test automatic padding self.assertEqual(out_sa['''input_ids'''].shape[-1] , 33 ) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] ) self.assertFalse(0 in out_sa['''attention_mask'''][0] ) # short slice does have padding self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] ) self.assertTrue(0 in out_sa['''attention_mask'''][1] ) # p # test single pair max_length padding self.assertEqual(out_p['''input_ids'''].shape[-1] , 60 ) self.assertTrue(pad_token_id in out_p['''input_ids'''] ) self.assertTrue(0 in out_p['''attention_mask'''] ) # p2 # test automatic padding pair self.assertEqual(out_pa['''input_ids'''].shape[-1] , 52 ) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] ) self.assertFalse(0 in out_pa['''attention_mask'''][0] ) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] ) self.assertTrue(0 in out_pa['''attention_mask'''][1] ) def UpperCAmelCase_ ( self ): lowerCAmelCase_ = '$$$' lowerCAmelCase_ = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=_lowerCamelCase , add_bos_token=_lowerCamelCase ) lowerCAmelCase_ = 'This is a simple input' lowerCAmelCase_ = ['This is a simple input 1', 'This is a simple input 2'] lowerCAmelCase_ = tokenizer.bos_token_id lowerCAmelCase_ = tokenizer(_lowerCamelCase ) lowerCAmelCase_ = tokenizer(_lowerCamelCase ) self.assertEqual(out_s.input_ids[0] , _lowerCamelCase ) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) ) lowerCAmelCase_ = tokenizer.decode(out_s.input_ids ) lowerCAmelCase_ = tokenizer.batch_decode(out_sa.input_ids ) self.assertEqual(decode_s.split()[0] , _lowerCamelCase ) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) ) @slow def UpperCAmelCase_ ( self ): lowerCAmelCase_ = CodeGenTokenizer.from_pretrained('''Salesforce/codegen-350M-mono''' ) lowerCAmelCase_ = '\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#' lowerCAmelCase_ = '\nif len_a > len_b: result = a\nelse: result = b' lowerCAmelCase_ = tokenizer.encode(_lowerCamelCase ) lowerCAmelCase_ = ['^#', re.escape('''<|endoftext|>''' ), '^\'\'\'', '^"""', '\n\n\n'] lowerCAmelCase_ = tokenizer.decode(_lowerCamelCase , truncate_before_pattern=_lowerCamelCase ) self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def UpperCAmelCase_ ( self ): pass
274
from __future__ import annotations import copy import inspect import unittest import numpy as np from transformers import is_tf_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, TFLayoutLMvaModel, ) if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class A_ : '''simple docstring''' def __init__( self: Optional[int] , a: Union[str, Any] , a: Union[str, Any]=2 , a: str=3 , a: Any=4 , a: Union[str, Any]=2 , a: Tuple=7 , a: int=True , a: Tuple=True , a: List[str]=True , a: Union[str, Any]=True , a: str=99 , a: Tuple=36 , a: int=2 , a: Dict=4 , a: Union[str, Any]=37 , a: List[str]="gelu" , a: List[Any]=0.1 , a: Optional[int]=0.1 , a: Dict=512 , a: Union[str, Any]=16 , a: str=2 , a: int=0.0_2 , a: Optional[Any]=6 , a: Optional[int]=6 , a: Dict=3 , a: Optional[Any]=4 , a: Optional[Any]=None , a: Dict=1000 , ): __lowerCamelCase : List[str] = parent __lowerCamelCase : Optional[Any] = batch_size __lowerCamelCase : Optional[int] = num_channels __lowerCamelCase : str = image_size __lowerCamelCase : int = patch_size __lowerCamelCase : List[str] = is_training __lowerCamelCase : Dict = use_input_mask __lowerCamelCase : Any = use_token_type_ids __lowerCamelCase : List[str] = use_labels __lowerCamelCase : str = vocab_size __lowerCamelCase : List[Any] = hidden_size __lowerCamelCase : List[Any] = num_hidden_layers __lowerCamelCase : Any = num_attention_heads __lowerCamelCase : List[Any] = intermediate_size __lowerCamelCase : List[Any] = hidden_act __lowerCamelCase : Any = hidden_dropout_prob __lowerCamelCase : Optional[int] = attention_probs_dropout_prob __lowerCamelCase : Dict = max_position_embeddings __lowerCamelCase : Tuple = type_vocab_size __lowerCamelCase : int = type_sequence_label_size __lowerCamelCase : List[str] = initializer_range __lowerCamelCase : List[str] = coordinate_size __lowerCamelCase : int = shape_size __lowerCamelCase : Union[str, Any] = num_labels __lowerCamelCase : int = num_choices __lowerCamelCase : int = scope __lowerCamelCase : Any = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) __lowerCamelCase : Any = text_seq_length __lowerCamelCase : Optional[Any] = (image_size // patch_size) ** 2 + 1 __lowerCamelCase : Any = self.text_seq_length + self.image_seq_length def _snake_case ( self: List[str] ): __lowerCamelCase : Any = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size ) __lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox ) __lowerCamelCase : int = bbox.numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: __lowerCamelCase : List[str] = bbox[i, j, 3] __lowerCamelCase : str = bbox[i, j, 1] __lowerCamelCase : Dict = tmp_coordinate if bbox[i, j, 2] < bbox[i, j, 0]: __lowerCamelCase : Tuple = bbox[i, j, 2] __lowerCamelCase : Any = bbox[i, j, 0] __lowerCamelCase : List[str] = tmp_coordinate __lowerCamelCase : str = tf.constant(a ) __lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowerCamelCase : Any = None if self.use_input_mask: __lowerCamelCase : int = random_attention_mask([self.batch_size, self.text_seq_length] ) __lowerCamelCase : Tuple = None if self.use_token_type_ids: __lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size ) __lowerCamelCase : Dict = None __lowerCamelCase : Union[str, Any] = None if self.use_labels: __lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCamelCase : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels ) __lowerCamelCase : Dict = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def _snake_case ( self: Tuple , a: List[Any] , a: Any , a: List[str] , a: Dict , a: Optional[Any] , a: Dict ): __lowerCamelCase : Optional[Any] = TFLayoutLMvaModel(config=a ) # text + image __lowerCamelCase : Optional[Any] = model(a , pixel_values=a , training=a ) __lowerCamelCase : int = model( a , bbox=a , pixel_values=a , attention_mask=a , token_type_ids=a , training=a , ) __lowerCamelCase : List[Any] = model(a , bbox=a , pixel_values=a , training=a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # text only __lowerCamelCase : List[Any] = model(a , training=a ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only __lowerCamelCase : Optional[Any] = model({'pixel_values': pixel_values} , training=a ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) ) def _snake_case ( self: Dict , a: Dict , a: Optional[Any] , a: int , a: Optional[int] , a: List[str] , a: List[str] , a: List[str] ): __lowerCamelCase : List[str] = self.num_labels __lowerCamelCase : str = TFLayoutLMvaForSequenceClassification(config=a ) __lowerCamelCase : int = model( a , bbox=a , pixel_values=a , attention_mask=a , token_type_ids=a , labels=a , training=a , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _snake_case ( self: Optional[int] , a: Union[str, Any] , a: Union[str, Any] , a: Dict , a: Optional[Any] , a: Tuple , a: Optional[Any] , a: List[Any] ): __lowerCamelCase : Union[str, Any] = self.num_labels __lowerCamelCase : Any = TFLayoutLMvaForTokenClassification(config=a ) __lowerCamelCase : Optional[Any] = model( a , bbox=a , pixel_values=a , attention_mask=a , token_type_ids=a , labels=a , training=a , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) ) def _snake_case ( self: Dict , a: Optional[Any] , a: str , a: Dict , a: Union[str, Any] , a: List[Any] , a: Optional[int] , a: List[str] ): __lowerCamelCase : List[Any] = 2 __lowerCamelCase : Any = TFLayoutLMvaForQuestionAnswering(config=a ) __lowerCamelCase : Any = model( a , bbox=a , pixel_values=a , attention_mask=a , token_type_ids=a , start_positions=a , end_positions=a , training=a , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _snake_case ( self: List[Any] ): __lowerCamelCase : str = self.prepare_config_and_inputs() ((__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase)) : List[Any] = config_and_inputs __lowerCamelCase : Tuple = { 'input_ids': input_ids, 'bbox': bbox, 'pixel_values': pixel_values, 'token_type_ids': token_type_ids, 'attention_mask': input_mask, } return config, inputs_dict @require_tf class A_ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ): '''simple docstring''' __snake_case = ( ( TFLayoutLMvaModel, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, ) if is_tf_available() else () ) __snake_case = ( {"""document-question-answering""": TFLayoutLMvaForQuestionAnswering, """feature-extraction""": TFLayoutLMvaModel} if is_tf_available() else {} ) __snake_case = False __snake_case = False __snake_case = False def _snake_case ( self: int , a: List[str] , a: Any , a: Optional[Any] , a: Tuple , a: Tuple ): return True def _snake_case ( self: str , a: Any , a: Any , a: Optional[int]=False ): __lowerCamelCase : List[str] = copy.deepcopy(a ) if model_class in get_values(a ): __lowerCamelCase : Tuple = { k: tf.tile(tf.expand_dims(a , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) ) if isinstance(a , tf.Tensor ) and v.ndim > 0 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(a ): __lowerCamelCase : Any = tf.ones(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(a ): __lowerCamelCase : Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) __lowerCamelCase : Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(a ): __lowerCamelCase : str = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(a ): __lowerCamelCase : Dict = tf.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa ) return inputs_dict def _snake_case ( self: Tuple ): __lowerCamelCase : int = TFLayoutLMvaModelTester(self ) __lowerCamelCase : str = ConfigTester(self , config_class=a , hidden_size=37 ) def _snake_case ( self: Union[str, Any] ): self.config_tester.run_common_tests() def _snake_case ( self: Union[str, Any] ): __lowerCamelCase , __lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase : int = model_class(a ) if getattr(a , 'hf_compute_loss' , a ): # The number of elements in the loss should be the same as the number of elements in the label __lowerCamelCase : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , a , return_labels=a ) __lowerCamelCase : int = prepared_for_class[ sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=a )[0] ] __lowerCamelCase : Dict = added_label.shape.as_list()[:1] # Test that model correctly compute the loss with kwargs __lowerCamelCase : Optional[int] = self._prepare_for_class(inputs_dict.copy() , a , return_labels=a ) __lowerCamelCase : Dict = prepared_for_class.pop('input_ids' ) __lowerCamelCase : str = model(a , **a )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss when we mask some positions __lowerCamelCase : List[Any] = self._prepare_for_class(inputs_dict.copy() , a , return_labels=a ) __lowerCamelCase : List[str] = prepared_for_class.pop('input_ids' ) if "labels" in prepared_for_class: __lowerCamelCase : int = prepared_for_class['labels'].numpy() if len(labels.shape ) > 1 and labels.shape[1] != 1: __lowerCamelCase : Tuple = -100 __lowerCamelCase : Tuple = tf.convert_to_tensor(a ) __lowerCamelCase : Tuple = model(a , **a )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) ) # Test that model correctly compute the loss with a dict __lowerCamelCase : int = self._prepare_for_class(inputs_dict.copy() , a , return_labels=a ) __lowerCamelCase : str = model(a )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss with a tuple __lowerCamelCase : str = self._prepare_for_class(inputs_dict.copy() , a , return_labels=a ) # Get keys that were added with the _prepare_for_class function __lowerCamelCase : Optional[Any] = prepared_for_class.keys() - inputs_dict.keys() __lowerCamelCase : List[Any] = inspect.signature(model.call ).parameters __lowerCamelCase : List[str] = list(signature.keys() ) # Create a dictionary holding the location of the tensors in the tuple __lowerCamelCase : Optional[int] = {0: 'input_ids'} for label_key in label_keys: __lowerCamelCase : Dict = signature_names.index(a ) __lowerCamelCase : str = label_key __lowerCamelCase : List[str] = sorted(tuple_index_mapping.items() ) # Initialize a list with their default values, update the values and convert to a tuple __lowerCamelCase : Optional[int] = [] for name in signature_names: if name != "kwargs": list_input.append(signature[name].default ) for index, value in sorted_tuple_index_mapping: __lowerCamelCase : Optional[int] = prepared_for_class[value] __lowerCamelCase : Any = tuple(a ) # Send to model __lowerCamelCase : int = model(tuple_input[:-1] )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) def _snake_case ( self: List[str] ): ( ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ) : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(a , a , a , a , a , a ) def _snake_case ( self: int ): ( ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ) : str = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __lowerCamelCase : Union[str, Any] = type self.model_tester.create_and_check_model(a , a , a , a , a , a ) def _snake_case ( self: Dict ): ( ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ) : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification( a , a , a , a , a , a , a ) def _snake_case ( self: str ): ( ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ) : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification( a , a , a , a , a , a , a ) def _snake_case ( self: str ): ( ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ) : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering( a , a , a , a , a , a , a ) @slow def _snake_case ( self: int ): for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCamelCase : Dict = TFLayoutLMvaModel.from_pretrained(a ) self.assertIsNotNone(a ) def UpperCamelCase__ ( ): __lowerCamelCase : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf class A_ ( unittest.TestCase ): '''simple docstring''' @cached_property def _snake_case ( self: Optional[int] ): return LayoutLMvaImageProcessor(apply_ocr=a ) if is_vision_available() else None @slow def _snake_case ( self: Optional[Any] ): __lowerCamelCase : Tuple = TFLayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' ) __lowerCamelCase : Union[str, Any] = self.default_image_processor __lowerCamelCase : List[Any] = prepare_img() __lowerCamelCase : str = image_processor(images=a , return_tensors='tf' ).pixel_values __lowerCamelCase : Union[str, Any] = tf.constant([[1, 2]] ) __lowerCamelCase : str = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 ) # forward pass __lowerCamelCase : int = model(input_ids=a , bbox=a , pixel_values=a , training=a ) # verify the logits __lowerCamelCase : Optional[int] = (1, 199, 768) self.assertEqual(outputs.last_hidden_state.shape , a ) __lowerCamelCase : Any = tf.constant( [[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , a , atol=1e-4 ) )
669
0
"""simple docstring""" def __magic_name__ ( _lowerCamelCase : float , _lowerCamelCase : list[float] ): if discount_rate < 0: raise ValueError("""Discount rate cannot be negative""" ) if not cash_flows: raise ValueError("""Cash flows list cannot be empty""" ) __a : Tuple = sum( cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(_lowerCamelCase ) ) return round(_lowerCamelCase , ndigits=2 ) if __name__ == "__main__": import doctest doctest.testmod()
63
"""simple docstring""" from typing import Callable, Dict, Optional, Tuple import torch from torch import nn from torch.distributions import ( AffineTransform, Distribution, Independent, NegativeBinomial, Normal, StudentT, TransformedDistribution, ) class SCREAMING_SNAKE_CASE__ ( __snake_case ): def __init__(self , _lowercase , _lowercase=None , _lowercase=None , _lowercase=0 ): '''simple docstring''' __a : Any = 1.0 if scale is None else scale __a : str = 0.0 if loc is None else loc super().__init__(_lowercase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=_lowercase )] ) @property def lowerCAmelCase__(self ): '''simple docstring''' return self.base_dist.mean * self.scale + self.loc @property def lowerCAmelCase__(self ): '''simple docstring''' return self.base_dist.variance * self.scale**2 @property def lowerCAmelCase__(self ): '''simple docstring''' return self.variance.sqrt() class SCREAMING_SNAKE_CASE__ ( nn.Module ): def __init__(self , _lowercase , _lowercase , _lowercase , **_lowercase ): '''simple docstring''' super().__init__(**_lowercase ) __a : str = args_dim __a : List[Any] = nn.ModuleList([nn.Linear(_lowercase , _lowercase ) for dim in args_dim.values()] ) __a : Dict = domain_map def lowerCAmelCase__(self , _lowercase ): '''simple docstring''' __a : List[Any] = [proj(_lowercase ) for proj in self.proj] return self.domain_map(*_lowercase ) class SCREAMING_SNAKE_CASE__ ( nn.Module ): def __init__(self , _lowercase ): '''simple docstring''' super().__init__() __a : Optional[int] = function def lowerCAmelCase__(self , _lowercase , *_lowercase ): '''simple docstring''' return self.function(_lowercase , *_lowercase ) class SCREAMING_SNAKE_CASE__ : _lowerCAmelCase = 42 _lowerCAmelCase = 42 _lowerCAmelCase = 42 def __init__(self , _lowercase = 1 ): '''simple docstring''' __a : Optional[int] = dim __a : str = {k: dim * self.args_dim[k] for k in self.args_dim} def lowerCAmelCase__(self , _lowercase ): '''simple docstring''' if self.dim == 1: return self.distribution_class(*_lowercase ) else: return Independent(self.distribution_class(*_lowercase ) , 1 ) def lowerCAmelCase__(self , _lowercase , _lowercase = None , _lowercase = None , ): '''simple docstring''' __a : Tuple = self._base_distribution(_lowercase ) if loc is None and scale is None: return distr else: return AffineTransformed(_lowercase , loc=_lowercase , scale=_lowercase , event_dim=self.event_dim ) @property def lowerCAmelCase__(self ): '''simple docstring''' return () if self.dim == 1 else (self.dim,) @property def lowerCAmelCase__(self ): '''simple docstring''' return len(self.event_shape ) @property def lowerCAmelCase__(self ): '''simple docstring''' return 0.0 def lowerCAmelCase__(self , _lowercase ): '''simple docstring''' return ParameterProjection( in_features=_lowercase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , ) def lowerCAmelCase__(self , *_lowercase ): '''simple docstring''' raise NotImplementedError() @staticmethod def lowerCAmelCase__(_lowercase ): '''simple docstring''' return (x + torch.sqrt(torch.square(_lowercase ) + 4.0 )) / 2.0 class SCREAMING_SNAKE_CASE__ ( __snake_case ): _lowerCAmelCase = {"df": 1, "loc": 1, "scale": 1} _lowerCAmelCase = StudentT @classmethod def lowerCAmelCase__(cls , _lowercase , _lowercase , _lowercase ): '''simple docstring''' __a : int = cls.squareplus(_lowercase ).clamp_min(torch.finfo(scale.dtype ).eps ) __a : Optional[Any] = 2.0 + cls.squareplus(_lowercase ) return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 ) class SCREAMING_SNAKE_CASE__ ( __snake_case ): _lowerCAmelCase = {"loc": 1, "scale": 1} _lowerCAmelCase = Normal @classmethod def lowerCAmelCase__(cls , _lowercase , _lowercase ): '''simple docstring''' __a : str = cls.squareplus(_lowercase ).clamp_min(torch.finfo(scale.dtype ).eps ) return loc.squeeze(-1 ), scale.squeeze(-1 ) class SCREAMING_SNAKE_CASE__ ( __snake_case ): _lowerCAmelCase = {"total_count": 1, "logits": 1} _lowerCAmelCase = NegativeBinomial @classmethod def lowerCAmelCase__(cls , _lowercase , _lowercase ): '''simple docstring''' __a : Union[str, Any] = cls.squareplus(_lowercase ) return total_count.squeeze(-1 ), logits.squeeze(-1 ) def lowerCAmelCase__(self , _lowercase ): '''simple docstring''' __a , __a : Optional[Any] = distr_args if self.dim == 1: return self.distribution_class(total_count=_lowercase , logits=_lowercase ) else: return Independent(self.distribution_class(total_count=_lowercase , logits=_lowercase ) , 1 ) def lowerCAmelCase__(self , _lowercase , _lowercase = None , _lowercase = None ): '''simple docstring''' __a , __a : List[Any] = distr_args if scale is not None: # See scaling property of Gamma. logits += scale.log() return self._base_distribution((total_count, logits) )
63
1
'''simple docstring''' import collections import importlib.util import os import re from pathlib import Path A__ : Dict = """src/transformers""" # Matches is_xxx_available() A__ : Union[str, Any] = re.compile(R"""is\_([a-z_]*)_available()""") # Catches a one-line _import_struct = {xxx} A__ : Tuple = re.compile(R"""^_import_structure\s+=\s+\{([^\}]+)\}""") # Catches a line with a key-values pattern: "bla": ["foo", "bar"] A__ : str = re.compile(R"""\s+\"\S*\":\s+\[([^\]]*)\]""") # Catches a line if not is_foo_available A__ : Union[str, Any] = re.compile(R"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""") # Catches a line _import_struct["bla"].append("foo") A__ : int = re.compile(R"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""") # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] A__ : List[Any] = re.compile(R"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""") # Catches a line with an object between quotes and a comma: "MyModel", A__ : Tuple = re.compile("""^\s+\"([^\"]+)\",""") # Catches a line with objects between brackets only: ["foo", "bar"], A__ : List[Any] = re.compile("""^\s+\[([^\]]+)\]""") # Catches a line with from foo import bar, bla, boo A__ : Any = re.compile(R"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""") # Catches a line with try: A__ : str = re.compile(R"""^\s*try:""") # Catches a line with else: A__ : List[str] = re.compile(R"""^\s*else:""") def UpperCAmelCase__ ( UpperCAmelCase_ : List[Any] ) -> List[str]: if _re_test_backend.search(UpperCAmelCase_ ) is None: return None __lowerCamelCase : str = [b[0] for b in _re_backend.findall(UpperCAmelCase_ )] backends.sort() return "_and_".join(UpperCAmelCase_ ) def UpperCAmelCase__ ( UpperCAmelCase_ : Tuple ) -> Union[str, Any]: with open(UpperCAmelCase_ , 'r' , encoding='utf-8' , newline='\n' ) as f: __lowerCamelCase : Union[str, Any] = f.readlines() __lowerCamelCase : Any = 0 while line_index < len(UpperCAmelCase_ ) and not lines[line_index].startswith('_import_structure = {' ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(UpperCAmelCase_ ): return None # First grab the objects without a specific backend in _import_structure __lowerCamelCase : Dict = [] while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None: __lowerCamelCase : Optional[Any] = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(UpperCAmelCase_ ): __lowerCamelCase : Optional[int] = _re_one_line_import_struct.search(UpperCAmelCase_ ).groups()[0] __lowerCamelCase : Union[str, Any] = re.findall('\[([^\]]+)\]' , UpperCAmelCase_ ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(', ' )] ) line_index += 1 continue __lowerCamelCase : Tuple = _re_import_struct_key_value.search(UpperCAmelCase_ ) if single_line_import_search is not None: __lowerCamelCase : Dict = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(UpperCAmelCase_ ) > 0] objects.extend(UpperCAmelCase_ ) elif line.startswith(' ' * 8 + '"' ): objects.append(line[9:-3] ) line_index += 1 __lowerCamelCase : Tuple = {'none': objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith('if TYPE_CHECKING' ): # If the line is an if not is_backend_available, we grab all objects associated. __lowerCamelCase : Dict = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: __lowerCamelCase : int = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 __lowerCamelCase : Tuple = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ): __lowerCamelCase : int = lines[line_index] if _re_import_struct_add_one.search(UpperCAmelCase_ ) is not None: objects.append(_re_import_struct_add_one.search(UpperCAmelCase_ ).groups()[0] ) elif _re_import_struct_add_many.search(UpperCAmelCase_ ) is not None: __lowerCamelCase : int = _re_import_struct_add_many.search(UpperCAmelCase_ ).groups()[0].split(', ' ) __lowerCamelCase : Union[str, Any] = [obj[1:-1] for obj in imports if len(UpperCAmelCase_ ) > 0] objects.extend(UpperCAmelCase_ ) elif _re_between_brackets.search(UpperCAmelCase_ ) is not None: __lowerCamelCase : Dict = _re_between_brackets.search(UpperCAmelCase_ ).groups()[0].split(', ' ) __lowerCamelCase : Union[str, Any] = [obj[1:-1] for obj in imports if len(UpperCAmelCase_ ) > 0] objects.extend(UpperCAmelCase_ ) elif _re_quote_object.search(UpperCAmelCase_ ) is not None: objects.append(_re_quote_object.search(UpperCAmelCase_ ).groups()[0] ) elif line.startswith(' ' * 8 + '"' ): objects.append(line[9:-3] ) elif line.startswith(' ' * 12 + '"' ): objects.append(line[13:-3] ) line_index += 1 __lowerCamelCase : Optional[Any] = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend __lowerCamelCase : Union[str, Any] = [] while ( line_index < len(UpperCAmelCase_ ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith('else' ) ): __lowerCamelCase : List[str] = lines[line_index] __lowerCamelCase : Dict = _re_import.search(UpperCAmelCase_ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(', ' ) ) elif line.startswith(' ' * 8 ): objects.append(line[8:-2] ) line_index += 1 __lowerCamelCase : str = {'none': objects} # Let's continue with backend-specific objects while line_index < len(UpperCAmelCase_ ): # If the line is an if is_backend_available, we grab all objects associated. __lowerCamelCase : Tuple = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: __lowerCamelCase : Optional[Any] = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 __lowerCamelCase : Dict = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ): __lowerCamelCase : Optional[Any] = lines[line_index] __lowerCamelCase : Optional[Any] = _re_import.search(UpperCAmelCase_ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(', ' ) ) elif line.startswith(' ' * 12 ): objects.append(line[12:-2] ) line_index += 1 __lowerCamelCase : Any = objects else: line_index += 1 return import_dict_objects, type_hint_objects def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any] ) -> List[Any]: def find_duplicates(UpperCAmelCase_ : List[str] ): return [k for k, v in collections.Counter(UpperCAmelCase_ ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] __lowerCamelCase : Dict = [] for key in import_dict_objects.keys(): __lowerCamelCase : Any = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(F'Duplicate _import_structure definitions for: {duplicate_imports}' ) __lowerCamelCase : Optional[Any] = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(F'Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}' ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): __lowerCamelCase : Tuple = 'base imports' if key == 'none' else F'{key} backend' errors.append(F'Differences for {name}:' ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(F' {a} in TYPE_HINT but not in _import_structure.' ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(F' {a} in _import_structure but not in TYPE_HINT.' ) return errors def UpperCAmelCase__ ( ) -> str: __lowerCamelCase : str = [] for root, _, files in os.walk(UpperCAmelCase_ ): if "__init__.py" in files: __lowerCamelCase : Dict = os.path.join(UpperCAmelCase_ , '__init__.py' ) __lowerCamelCase : int = parse_init(UpperCAmelCase_ ) if objects is not None: __lowerCamelCase : Optional[Any] = analyze_results(*UpperCAmelCase_ ) if len(UpperCAmelCase_ ) > 0: __lowerCamelCase : Optional[int] = F'Problem in {fname}, both halves do not define the same objects.\n{errors[0]}' failures.append('\n'.join(UpperCAmelCase_ ) ) if len(UpperCAmelCase_ ) > 0: raise ValueError('\n\n'.join(UpperCAmelCase_ ) ) def UpperCAmelCase__ ( ) -> Union[str, Any]: __lowerCamelCase : List[Any] = [] for path, directories, files in os.walk(UpperCAmelCase_ ): for folder in directories: # Ignore private modules if folder.startswith('_' ): directories.remove(UpperCAmelCase_ ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(UpperCAmelCase_ ) / folder).glob('*.py' ) ) ) == 0: continue __lowerCamelCase : List[str] = str((Path(UpperCAmelCase_ ) / folder).relative_to(UpperCAmelCase_ ) ) __lowerCamelCase : int = short_path.replace(os.path.sep , '.' ) submodules.append(UpperCAmelCase_ ) for fname in files: if fname == "__init__.py": continue __lowerCamelCase : int = str((Path(UpperCAmelCase_ ) / fname).relative_to(UpperCAmelCase_ ) ) __lowerCamelCase : int = short_path.replace('.py' , '' ).replace(os.path.sep , '.' ) if len(submodule.split('.' ) ) == 1: submodules.append(UpperCAmelCase_ ) return submodules A__ : Optional[Any] = [ """convert_pytorch_checkpoint_to_tf2""", """modeling_flax_pytorch_utils""", ] def UpperCAmelCase__ ( ) -> List[Any]: # This is to make sure the transformers module imported is the one in the repo. __lowerCamelCase : List[str] = importlib.util.spec_from_file_location( 'transformers' , os.path.join(UpperCAmelCase_ , '__init__.py' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , ) __lowerCamelCase : Dict = spec.loader.load_module() __lowerCamelCase : str = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys() ] if len(UpperCAmelCase_ ) > 0: __lowerCamelCase : List[Any] = '\n'.join(F'- {module}' for module in module_not_registered ) raise ValueError( 'The following submodules are not properly registered in the main init of Transformers:\n' F'{list_of_modules}\n' 'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' ) if __name__ == "__main__": check_all_inits() check_submodules()
13
def lowerCAmelCase_ ( A_): if not all(char in "01" for char in bin_string): raise ValueError("Non-binary value was passed to the function") if not bin_string: raise ValueError("Empty string was passed to the function") UpperCamelCase__: List[Any] = "" while len(A_) % 3 != 0: UpperCamelCase__: int = "0" + bin_string UpperCamelCase__: Optional[int] = [ bin_string[index : index + 3] for index in range(len(A_)) if index % 3 == 0 ] for bin_group in bin_string_in_3_list: UpperCamelCase__: Union[str, Any] = 0 for index, val in enumerate(A_): oct_val += int(2 ** (2 - index) * int(A_)) oct_string += str(A_) return oct_string if __name__ == "__main__": from doctest import testmod testmod()
380
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices _A : Tuple = logging.get_logger(__name__) _A : Optional[Any] = { '''shi-labs/nat-mini-in1k-224''': '''https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json''', # See all Nat models at https://huggingface.co/models?filter=nat } class __lowercase ( UpperCAmelCase__ , UpperCAmelCase__ ): '''simple docstring''' _SCREAMING_SNAKE_CASE : List[Any] = 'nat' _SCREAMING_SNAKE_CASE : Optional[Any] = { 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int]=4 , SCREAMING_SNAKE_CASE__ : int=3 , SCREAMING_SNAKE_CASE__ : Any=64 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=[3, 4, 6, 5] , SCREAMING_SNAKE_CASE__ : Optional[Any]=[2, 4, 8, 16] , SCREAMING_SNAKE_CASE__ : Any=7 , SCREAMING_SNAKE_CASE__ : int=3.0 , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : Optional[int]=0.0 , SCREAMING_SNAKE_CASE__ : int=0.0 , SCREAMING_SNAKE_CASE__ : List[Any]=0.1 , SCREAMING_SNAKE_CASE__ : Dict="gelu" , SCREAMING_SNAKE_CASE__ : Dict=0.0_2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1e-5 , SCREAMING_SNAKE_CASE__ : str=0.0 , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Dict=None , **SCREAMING_SNAKE_CASE__ : str , ) -> Any: super().__init__(**SCREAMING_SNAKE_CASE__ ) __lowerCAmelCase = patch_size __lowerCAmelCase = num_channels __lowerCAmelCase = embed_dim __lowerCAmelCase = depths __lowerCAmelCase = len(SCREAMING_SNAKE_CASE__ ) __lowerCAmelCase = num_heads __lowerCAmelCase = kernel_size __lowerCAmelCase = mlp_ratio __lowerCAmelCase = qkv_bias __lowerCAmelCase = hidden_dropout_prob __lowerCAmelCase = attention_probs_dropout_prob __lowerCAmelCase = drop_path_rate __lowerCAmelCase = hidden_act __lowerCAmelCase = layer_norm_eps __lowerCAmelCase = initializer_range # we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model __lowerCAmelCase = int(embed_dim * 2 ** (len(SCREAMING_SNAKE_CASE__ ) - 1) ) __lowerCAmelCase = layer_scale_init_value __lowerCAmelCase = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(SCREAMING_SNAKE_CASE__ ) + 1 )] __lowerCAmelCase = get_aligned_output_features_output_indices( out_features=SCREAMING_SNAKE_CASE__ , out_indices=SCREAMING_SNAKE_CASE__ , stage_names=self.stage_names )
714
'''simple docstring''' import os import unittest from huggingface_hub.utils import are_progress_bars_disabled import transformers.models.bart.tokenization_bart from transformers import logging from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context from transformers.utils.logging import disable_progress_bar, enable_progress_bar class _lowercase ( unittest.TestCase ): '''simple docstring''' def a ( self : List[str] ) -> Optional[int]: __lowerCAmelCase = logging.get_logger() # the current default level is logging.WARNING __lowerCAmelCase = logging.get_verbosity() logging.set_verbosity_error() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_warning() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_info() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_debug() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) # restore to the original level logging.set_verbosity(SCREAMING_SNAKE_CASE__ ) def a ( self : int ) -> str: __lowerCAmelCase = logging.get_verbosity() __lowerCAmelCase = logging.get_logger("""transformers.models.bart.tokenization_bart""" ) __lowerCAmelCase = """Testing 1, 2, 3""" # should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`) if level_origin <= logging.WARNING: with CaptureLogger(SCREAMING_SNAKE_CASE__ ) as cl: logger.warning(SCREAMING_SNAKE_CASE__ ) self.assertEqual(cl.out , msg + """\n""" ) # this is setting the level for all of `transformers.*` loggers logging.set_verbosity_error() # should not be able to log warnings with CaptureLogger(SCREAMING_SNAKE_CASE__ ) as cl: logger.warning(SCREAMING_SNAKE_CASE__ ) self.assertEqual(cl.out , """""" ) # should be able to log warnings again logging.set_verbosity_warning() with CaptureLogger(SCREAMING_SNAKE_CASE__ ) as cl: logger.warning(SCREAMING_SNAKE_CASE__ ) self.assertEqual(cl.out , msg + """\n""" ) # restore to the original level logging.set_verbosity(SCREAMING_SNAKE_CASE__ ) @mockenv(TRANSFORMERS_VERBOSITY="""error""" ) def a ( self : Optional[Any] ) -> List[Any]: # reset for the env var to take effect, next time some logger call is made transformers.utils.logging._reset_library_root_logger() # this action activates the env var __lowerCAmelCase = logging.get_logger("""transformers.models.bart.tokenization_bart""" ) __lowerCAmelCase = os.getenv("""TRANSFORMERS_VERBOSITY""" , SCREAMING_SNAKE_CASE__ ) __lowerCAmelCase = logging.log_levels[env_level_str] __lowerCAmelCase = logging.get_verbosity() self.assertEqual( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , f"""TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}""" , ) # restore to the original level __lowerCAmelCase = """""" transformers.utils.logging._reset_library_root_logger() @mockenv(TRANSFORMERS_VERBOSITY="""super-error""" ) def a ( self : int ) -> List[Any]: # reset for the env var to take effect, next time some logger call is made transformers.utils.logging._reset_library_root_logger() __lowerCAmelCase = logging.logging.getLogger() with CaptureLogger(SCREAMING_SNAKE_CASE__ ) as cl: # this action activates the env var logging.get_logger("""transformers.models.bart.tokenization_bart""" ) self.assertIn("""Unknown option TRANSFORMERS_VERBOSITY=super-error""" , cl.out ) # no need to restore as nothing was changed def a ( self : str ) -> Optional[Any]: # testing `logger.warning_advice()` transformers.utils.logging._reset_library_root_logger() __lowerCAmelCase = logging.get_logger("""transformers.models.bart.tokenization_bart""" ) __lowerCAmelCase = """Testing 1, 2, 3""" with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="""1""" ): # nothing should be logged as env var disables this method with CaptureLogger(SCREAMING_SNAKE_CASE__ ) as cl: logger.warning_advice(SCREAMING_SNAKE_CASE__ ) self.assertEqual(cl.out , """""" ) with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="""""" ): # should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset with CaptureLogger(SCREAMING_SNAKE_CASE__ ) as cl: logger.warning_advice(SCREAMING_SNAKE_CASE__ ) self.assertEqual(cl.out , msg + """\n""" ) def UpperCamelCase_ ( ) -> List[str]: '''simple docstring''' disable_progress_bar() assert are_progress_bars_disabled() enable_progress_bar() assert not are_progress_bars_disabled()
330
0
import inspect import unittest from typing import List import numpy as np from transformers import EfficientFormerConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, ) from transformers.models.efficientformer.modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_vision_available(): from PIL import Image from transformers import EfficientFormerImageProcessor class _snake_case : def __init__( self : List[str], __lowercase : Optional[int], __lowercase : int = 13, __lowercase : int = 64, __lowercase : int = 2, __lowercase : int = 3, __lowercase : int = 3, __lowercase : bool = True, __lowercase : bool = True, __lowercase : int = 128, __lowercase : str=[16, 32, 64, 128], __lowercase : int = 7, __lowercase : int = 4, __lowercase : int = 37, __lowercase : str = "gelu", __lowercase : float = 0.1, __lowercase : float = 0.1, __lowercase : int = 10, __lowercase : float = 0.02, __lowercase : int = 2, __lowercase : int = 1, __lowercase : int = 128, __lowercase : List[int] = [2, 2, 2, 2], __lowercase : int = 2, __lowercase : int = 2, ): lowercase__ = parent lowercase__ = batch_size lowercase__ = image_size lowercase__ = patch_size lowercase__ = num_channels lowercase__ = is_training lowercase__ = use_labels lowercase__ = hidden_size lowercase__ = num_hidden_layers lowercase__ = num_attention_heads lowercase__ = intermediate_size lowercase__ = hidden_act lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = type_sequence_label_size lowercase__ = initializer_range lowercase__ = encoder_stride lowercase__ = num_attention_outputs lowercase__ = embed_dim lowercase__ = embed_dim + 1 lowercase__ = resolution lowercase__ = depths lowercase__ = hidden_sizes lowercase__ = dim lowercase__ = mlp_expansion_ratio def A__ ( self : List[str] ): lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase__ = None if self.use_labels: lowercase__ = ids_tensor([self.batch_size], self.type_sequence_label_size ) lowercase__ = self.get_config() return config, pixel_values, labels def A__ ( self : List[Any] ): return EfficientFormerConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=__lowercase, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, resolution=self.resolution, depths=self.depths, hidden_sizes=self.hidden_sizes, dim=self.dim, mlp_expansion_ratio=self.mlp_expansion_ratio, ) def A__ ( self : Union[str, Any], __lowercase : Optional[Any], __lowercase : List[str], __lowercase : Optional[Any] ): lowercase__ = TFEfficientFormerModel(config=__lowercase ) lowercase__ = model(__lowercase, training=__lowercase ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) def A__ ( self : Union[str, Any], __lowercase : List[str], __lowercase : str, __lowercase : List[str] ): lowercase__ = self.type_sequence_label_size lowercase__ = TFEfficientFormerForImageClassification(__lowercase ) lowercase__ = model(__lowercase, labels=__lowercase, training=__lowercase ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) ) # test greyscale images lowercase__ = 1 lowercase__ = TFEfficientFormerForImageClassification(__lowercase ) lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowercase__ = model(__lowercase, labels=__lowercase ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) ) def A__ ( self : Dict ): lowercase__ = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ = config_and_inputs lowercase__ = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class _snake_case ( lowercase__ , lowercase__ , unittest.TestCase): UpperCamelCase__ : Optional[Any] =( ( TFEfficientFormerModel, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerForImageClassification, ) if is_tf_available() else () ) UpperCamelCase__ : Dict =( { """feature-extraction""": TFEfficientFormerModel, """image-classification""": ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, ), } if is_tf_available() else {} ) UpperCamelCase__ : List[str] =False UpperCamelCase__ : int =False UpperCamelCase__ : List[str] =False UpperCamelCase__ : Tuple =False UpperCamelCase__ : List[str] =False def A__ ( self : int ): lowercase__ = TFEfficientFormerModelTester(self ) lowercase__ = ConfigTester( self, config_class=__lowercase, has_text_modality=__lowercase, hidden_size=37 ) def A__ ( self : Tuple ): self.config_tester.run_common_tests() @unittest.skip(reason="EfficientFormer does not use inputs_embeds" ) def A__ ( self : Optional[Any] ): pass @unittest.skip(reason="EfficientFormer does not support input and output embeddings" ) def A__ ( self : Optional[int] ): pass def A__ ( self : Optional[Any] ): lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ = model_class(__lowercase ) lowercase__ = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__ = [*signature.parameters.keys()] lowercase__ = ["pixel_values"] self.assertListEqual(arg_names[:1], __lowercase ) def A__ ( self : int ): def check_hidden_states_output(__lowercase : Union[str, Any], __lowercase : Optional[int], __lowercase : Optional[Any] ): lowercase__ = model_class(__lowercase ) lowercase__ = model(**self._prepare_for_class(__lowercase, __lowercase ), training=__lowercase ) lowercase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowercase__ = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(__lowercase ), __lowercase ) if hasattr(self.model_tester, "encoder_seq_length" ): lowercase__ = self.model_tester.encoder_seq_length if hasattr(self.model_tester, "chunk_length" ) and self.model_tester.chunk_length > 1: lowercase__ = seq_length * self.model_tester.chunk_length else: lowercase__ = self.model_tester.seq_length self.assertListEqual( list(hidden_states[-1].shape[-2:] ), [seq_length, self.model_tester.hidden_size], ) if config.is_encoder_decoder: lowercase__ = outputs.decoder_hidden_states self.asseretIsInstance(__lowercase, (list, tuple) ) self.assertEqual(len(__lowercase ), __lowercase ) lowercase__ = getattr(self.model_tester, "seq_length", __lowercase ) lowercase__ = getattr(self.model_tester, "decoder_seq_length", __lowercase ) self.assertListEqual( list(hidden_states[-1].shape[-2:] ), [decoder_seq_length, self.model_tester.hidden_size], ) lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ = True check_hidden_states_output(__lowercase, __lowercase, __lowercase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase__ = True check_hidden_states_output(__lowercase, __lowercase, __lowercase ) def A__ ( self : Optional[Any], __lowercase : Any, __lowercase : List[str], __lowercase : Dict=False ): lowercase__ = super()._prepare_for_class(__lowercase, __lowercase, return_labels=__lowercase ) if return_labels: if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def A__ ( self : int ): lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowercase ) @unittest.skip(reason="EfficientFormer does not implement masked image modeling yet" ) def A__ ( self : Union[str, Any] ): lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*__lowercase ) def A__ ( self : Any ): lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowercase ) @slow def A__ ( self : int ): for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ = TFEfficientFormerModel.from_pretrained(__lowercase ) self.assertIsNotNone(__lowercase ) def A__ ( self : Optional[int] ): lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ = True lowercase__ = getattr(self.model_tester, "seq_length", __lowercase ) lowercase__ = getattr(self.model_tester, "encoder_seq_length", __lowercase ) lowercase__ = getattr(self.model_tester, "key_length", __lowercase ) lowercase__ = getattr(self.model_tester, "chunk_length", __lowercase ) if chunk_length is not None and hasattr(self.model_tester, "num_hashes" ): lowercase__ = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: lowercase__ = True lowercase__ = False lowercase__ = True lowercase__ = model_class(__lowercase ) lowercase__ = model(**self._prepare_for_class(__lowercase, __lowercase ), training=__lowercase ) lowercase__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(__lowercase ), self.model_tester.num_attention_outputs ) # check that output_attentions also work using config del inputs_dict["output_attentions"] lowercase__ = True lowercase__ = model_class(__lowercase ) lowercase__ = model(**self._prepare_for_class(__lowercase, __lowercase ), training=__lowercase ) lowercase__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(__lowercase ), self.model_tester.num_attention_outputs ) if chunk_length is not None: self.assertListEqual( list(attentions[0].shape[-4:] ), [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length], ) else: self.assertListEqual( list(attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) def A__ ( self : Optional[int] ): # We use a simplified version of this test for EfficientFormer because it requires training=False # and Keras refuses to let us force that during functional construction lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # Prepare our model lowercase__ = model_class(__lowercase ) # These are maximally general inputs for the model, with multiple None dimensions # Hopefully this will catch any conditionals that fail for flexible shapes lowercase__ = { key: tf.keras.Input(shape=val.shape[1:], dtype=val.dtype, name=__lowercase ) for key, val in model.input_signature.items() if key in model.dummy_inputs } lowercase__ = model(__lowercase ) self.assertTrue(outputs_dict is not None ) def __lowerCAmelCase ( ): lowercase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_tf @require_vision class _snake_case ( unittest.TestCase): @cached_property def A__ ( self : Dict ): return ( EfficientFormerImageProcessor.from_pretrained("snap-research/efficientformer-l1-300" ) if is_vision_available() else None ) @slow def A__ ( self : List[str] ): lowercase__ = TFEfficientFormerForImageClassification.from_pretrained("snap-research/efficientformer-l1-300" ) lowercase__ = self.default_image_processor lowercase__ = prepare_img() lowercase__ = image_processor(images=__lowercase, return_tensors="tf" ) # forward pass lowercase__ = model(**__lowercase, training=__lowercase ) # verify the logits lowercase__ = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape, __lowercase ) lowercase__ = tf.constant([-0.0555, 0.4825, -0.0852] ) self.assertTrue(np.allclose(outputs.logits[0, :3], __lowercase, atol=1e-4 ) ) @slow def A__ ( self : Optional[int] ): lowercase__ = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained( "snap-research/efficientformer-l1-300" ) lowercase__ = self.default_image_processor lowercase__ = prepare_img() lowercase__ = image_processor(images=__lowercase, return_tensors="tf" ) # forward pass lowercase__ = model(**__lowercase, training=__lowercase ) # verify the logits lowercase__ = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape, __lowercase ) lowercase__ = tf.constant([-0.1312, 0.4353, -1.0499] ) self.assertTrue(np.allclose(outputs.logits[0, :3], __lowercase, atol=1e-4 ) )
413
import math from typing import Dict, Iterable, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, is_torch_available, is_torch_tensor, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_torch_available(): import torch if is_vision_available(): import PIL lowercase_ = logging.get_logger(__name__) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): def constraint_to_multiple_of(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=None ): lowercase__ = round(val / multiple ) * multiple if max_val is not None and x > max_val: lowercase__ = math.floor(val / multiple ) * multiple if x < min_val: lowercase__ = math.ceil(val / multiple ) * multiple return x lowercase__ = (output_size, output_size) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else output_size lowercase__ , lowercase__ = get_image_size(SCREAMING_SNAKE_CASE_ ) lowercase__ , lowercase__ = output_size # determine new height and width lowercase__ = output_height / input_height lowercase__ = output_width / input_width if keep_aspect_ratio: # scale as little as possible if abs(1 - scale_width ) < abs(1 - scale_height ): # fit width lowercase__ = scale_width else: # fit height lowercase__ = scale_height lowercase__ = constraint_to_multiple_of(scale_height * input_height , multiple=SCREAMING_SNAKE_CASE_ ) lowercase__ = constraint_to_multiple_of(scale_width * input_width , multiple=SCREAMING_SNAKE_CASE_ ) return (new_height, new_width) class _snake_case ( lowercase__): UpperCamelCase__ : Tuple =["""pixel_values"""] def __init__( self : Any, __lowercase : bool = True, __lowercase : Dict[str, int] = None, __lowercase : PILImageResampling = PILImageResampling.BILINEAR, __lowercase : bool = False, __lowercase : int = 1, __lowercase : bool = True, __lowercase : Union[int, float] = 1 / 255, __lowercase : bool = True, __lowercase : Optional[Union[float, List[float]]] = None, __lowercase : Optional[Union[float, List[float]]] = None, **__lowercase : List[Any], ): super().__init__(**__lowercase ) lowercase__ = size if size is not None else {"height": 384, "width": 384} lowercase__ = get_size_dict(__lowercase ) lowercase__ = do_resize lowercase__ = size lowercase__ = keep_aspect_ratio lowercase__ = ensure_multiple_of lowercase__ = resample lowercase__ = do_rescale lowercase__ = rescale_factor lowercase__ = do_normalize lowercase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN lowercase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD def A__ ( self : List[Any], __lowercase : np.ndarray, __lowercase : Dict[str, int], __lowercase : bool = False, __lowercase : int = 1, __lowercase : PILImageResampling = PILImageResampling.BICUBIC, __lowercase : Optional[Union[str, ChannelDimension]] = None, **__lowercase : Union[str, Any], ): lowercase__ = get_size_dict(__lowercase ) if "height" not in size or "width" not in size: raise ValueError(F'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' ) lowercase__ = get_resize_output_image_size( __lowercase, output_size=(size["height"], size["width"]), keep_aspect_ratio=__lowercase, multiple=__lowercase, ) return resize(__lowercase, size=__lowercase, resample=__lowercase, data_format=__lowercase, **__lowercase ) def A__ ( self : str, __lowercase : np.ndarray, __lowercase : Union[int, float], __lowercase : Optional[Union[str, ChannelDimension]] = None, **__lowercase : List[Any], ): return rescale(__lowercase, scale=__lowercase, data_format=__lowercase, **__lowercase ) def A__ ( self : Any, __lowercase : np.ndarray, __lowercase : Union[float, List[float]], __lowercase : Union[float, List[float]], __lowercase : Optional[Union[str, ChannelDimension]] = None, **__lowercase : Optional[Any], ): return normalize(__lowercase, mean=__lowercase, std=__lowercase, data_format=__lowercase, **__lowercase ) def A__ ( self : List[str], __lowercase : ImageInput, __lowercase : bool = None, __lowercase : int = None, __lowercase : bool = None, __lowercase : int = None, __lowercase : PILImageResampling = None, __lowercase : bool = None, __lowercase : float = None, __lowercase : bool = None, __lowercase : Optional[Union[float, List[float]]] = None, __lowercase : Optional[Union[float, List[float]]] = None, __lowercase : Optional[Union[str, TensorType]] = None, __lowercase : ChannelDimension = ChannelDimension.FIRST, **__lowercase : Tuple, ): lowercase__ = do_resize if do_resize is not None else self.do_resize lowercase__ = size if size is not None else self.size lowercase__ = get_size_dict(__lowercase ) lowercase__ = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio lowercase__ = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of lowercase__ = resample if resample is not None else self.resample lowercase__ = do_rescale if do_rescale is not None else self.do_rescale lowercase__ = rescale_factor if rescale_factor is not None else self.rescale_factor lowercase__ = do_normalize if do_normalize is not None else self.do_normalize lowercase__ = image_mean if image_mean is not None else self.image_mean lowercase__ = image_std if image_std is not None else self.image_std lowercase__ = make_list_of_images(__lowercase ) if not valid_images(__lowercase ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # All transformations expect numpy arrays. lowercase__ = [to_numpy_array(__lowercase ) for image in images] if do_resize: lowercase__ = [self.resize(image=__lowercase, size=__lowercase, resample=__lowercase ) for image in images] if do_rescale: lowercase__ = [self.rescale(image=__lowercase, scale=__lowercase ) for image in images] if do_normalize: lowercase__ = [self.normalize(image=__lowercase, mean=__lowercase, std=__lowercase ) for image in images] lowercase__ = [to_channel_dimension_format(__lowercase, __lowercase ) for image in images] lowercase__ = {"pixel_values": images} return BatchFeature(data=__lowercase, tensor_type=__lowercase ) def A__ ( self : int, __lowercase : Optional[Any], __lowercase : List[Tuple] = None ): lowercase__ = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(__lowercase ) != len(__lowercase ): raise ValueError( "Make sure that you pass in as many target sizes as the batch dimension of the logits" ) if is_torch_tensor(__lowercase ): lowercase__ = target_sizes.numpy() lowercase__ = [] for idx in range(len(__lowercase ) ): lowercase__ = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ), size=target_sizes[idx], mode="bilinear", align_corners=__lowercase ) lowercase__ = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(__lowercase ) else: lowercase__ = logits.argmax(dim=1 ) lowercase__ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
413
1
import math import qiskit def UpperCamelCase__( UpperCamelCase__ : int = 1 , UpperCamelCase__ : int = 1 , UpperCamelCase__ : int = 1 )->qiskit.result.counts.Counts: if ( isinstance(UpperCamelCase__ , UpperCamelCase__ ) or isinstance(UpperCamelCase__ , UpperCamelCase__ ) or isinstance(UpperCamelCase__ , UpperCamelCase__ ) ): raise TypeError('''inputs must be integers.''' ) if (input_a < 0) or (input_a < 0) or (carry_in < 0): raise ValueError('''inputs must be positive.''' ) if ( (math.floor(UpperCamelCase__ ) != input_a) or (math.floor(UpperCamelCase__ ) != input_a) or (math.floor(UpperCamelCase__ ) != carry_in) ): raise ValueError('''inputs must be exact integers.''' ) if (input_a > 2) or (input_a > 2) or (carry_in > 2): raise ValueError('''inputs must be less or equal to 2.''' ) # build registers A__ = qiskit.QuantumRegister(4 , '''qr''' ) A__ = qiskit.ClassicalRegister(2 , '''cr''' ) # list the entries A__ = [input_a, input_a, carry_in] A__ = qiskit.QuantumCircuit(UpperCamelCase__ , UpperCamelCase__ ) for i in range(0 , 3 ): if entry[i] == 2: quantum_circuit.h(UpperCamelCase__ ) # for hadamard entries elif entry[i] == 1: quantum_circuit.x(UpperCamelCase__ ) # for 1 entries elif entry[i] == 0: quantum_circuit.i(UpperCamelCase__ ) # for 0 entries # build the circuit quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate quantum_circuit.cx(0 , 1 ) quantum_circuit.ccx(1 , 2 , 3 ) quantum_circuit.cx(1 , 2 ) quantum_circuit.cx(0 , 1 ) quantum_circuit.measure([2, 3] , UpperCamelCase__ ) # measure the last two qbits A__ = qiskit.Aer.get_backend('''aer_simulator''' ) A__ = qiskit.execute(UpperCamelCase__ , UpperCamelCase__ , shots=10_00 ) return job.result().get_counts(UpperCamelCase__ ) if __name__ == "__main__": print(F"Total sum count for state is: {quantum_full_adder(1, 1, 1)}")
212
def UpperCamelCase__( UpperCamelCase__ : int )->list: A__ = int(UpperCamelCase__ ) if n_element < 1: A__ = ValueError('''a should be a positive number''' ) raise my_error A__ = [1] A__ , A__ , A__ = (0, 0, 0) A__ = 1 while index < n_element: while hamming_list[i] * 2 <= hamming_list[-1]: i += 1 while hamming_list[j] * 3 <= hamming_list[-1]: j += 1 while hamming_list[k] * 5 <= hamming_list[-1]: k += 1 hamming_list.append( min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) ) index += 1 return hamming_list if __name__ == "__main__": a__: str = input('Enter the last number (nth term) of the Hamming Number Series: ') print('Formula of Hamming Number Series => 2^i * 3^j * 5^k') a__: Union[str, Any] = hamming(int(n)) print('-----------------------------------------------------') print(F"The list with nth numbers is: {hamming_numbers}") print('-----------------------------------------------------')
212
1
"""simple docstring""" import copy import tempfile import unittest from transformers import MaMaaaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder def UpperCAmelCase ( a__ , a__ , a__ , a__=None , a__=None , a__=None , a__=None , a__=None , ): '''simple docstring''' if attention_mask is None: lowerCAmelCase :Dict = input_ids.ne(config.pad_token_id ) if decoder_attention_mask is None: lowerCAmelCase :Any = decoder_input_ids.ne(config.pad_token_id ) if head_mask is None: lowerCAmelCase :int = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=a__ ) if decoder_head_mask is None: lowerCAmelCase :Any = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=a__ ) if cross_attn_head_mask is None: lowerCAmelCase :Optional[int] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=a__ ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } class __UpperCamelCase : def __init__( self : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any]=13 , UpperCAmelCase : Any=7 , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : List[str]=False , UpperCAmelCase : Optional[Any]=99 , UpperCAmelCase : List[str]=16 , UpperCAmelCase : Union[str, Any]=2 , UpperCAmelCase : List[str]=4 , UpperCAmelCase : Dict=4 , UpperCAmelCase : Dict="relu" , UpperCAmelCase : Optional[Any]=0.1 , UpperCAmelCase : List[Any]=0.1 , UpperCAmelCase : List[Any]=0.0 , UpperCAmelCase : Optional[int]=0.0 , UpperCAmelCase : Tuple=20 , UpperCAmelCase : Dict=2 , UpperCAmelCase : List[Any]=1 , UpperCAmelCase : int=0 , ) -> Any: lowerCAmelCase :str = parent lowerCAmelCase :List[str] = batch_size lowerCAmelCase :Optional[Any] = seq_length lowerCAmelCase :Dict = is_training lowerCAmelCase :Any = use_labels lowerCAmelCase :Dict = vocab_size lowerCAmelCase :Union[str, Any] = hidden_size lowerCAmelCase :List[Any] = num_hidden_layers lowerCAmelCase :Dict = num_attention_heads lowerCAmelCase :Any = intermediate_size lowerCAmelCase :Union[str, Any] = hidden_act lowerCAmelCase :str = hidden_dropout_prob lowerCAmelCase :Optional[Any] = attention_probs_dropout_prob lowerCAmelCase :int = encoder_layerdrop lowerCAmelCase :List[str] = decoder_layerdrop lowerCAmelCase :List[str] = max_position_embeddings lowerCAmelCase :Dict = eos_token_id lowerCAmelCase :List[str] = pad_token_id lowerCAmelCase :Any = bos_token_id def UpperCAmelCase__ ( self : List[Any] ) -> Optional[int]: lowerCAmelCase :Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase :Dict = self.eos_token_id # Eos Token lowerCAmelCase :Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) # we need to clamp the input ids here to avoid having pad token in between # this is because for M2M100 the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input lowerCAmelCase :int = input_ids.clamp(self.pad_token_id + 1 ) lowerCAmelCase :Optional[Any] = decoder_input_ids.clamp(self.pad_token_id + 1 ) lowerCAmelCase :str = self.get_config() lowerCAmelCase :Tuple = prepare_mam_aaa_inputs_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) return config, inputs_dict def UpperCAmelCase__ ( self : str ) -> Tuple: return MaMaaaConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , ) def UpperCAmelCase__ ( self : Optional[int] ) -> List[str]: lowerCAmelCase , lowerCAmelCase :Union[str, Any] = self.prepare_config_and_inputs() return config, inputs_dict def UpperCAmelCase__ ( self : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[int] ) -> Optional[int]: lowerCAmelCase :Optional[int] = MaMaaaModel(config=UpperCAmelCase ).get_decoder().to(UpperCAmelCase ).eval() lowerCAmelCase :Optional[Any] = inputs_dict['input_ids'] lowerCAmelCase :Any = inputs_dict['attention_mask'] lowerCAmelCase :Any = inputs_dict['head_mask'] # first forward pass lowerCAmelCase :Any = model(UpperCAmelCase , attention_mask=UpperCAmelCase , head_mask=UpperCAmelCase , use_cache=UpperCAmelCase ) lowerCAmelCase , lowerCAmelCase :str = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids lowerCAmelCase :Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size ) lowerCAmelCase :int = ids_tensor((self.batch_size, 3) , 2 ) # append to next input_ids and lowerCAmelCase :int = torch.cat([input_ids, next_tokens] , dim=-1 ) lowerCAmelCase :str = torch.cat([attention_mask, next_attn_mask] , dim=-1 ) lowerCAmelCase :List[str] = model(UpperCAmelCase , attention_mask=UpperCAmelCase )['last_hidden_state'] lowerCAmelCase :Optional[int] = model(UpperCAmelCase , attention_mask=UpperCAmelCase , past_key_values=UpperCAmelCase )[ 'last_hidden_state' ] # select random slice lowerCAmelCase :List[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item() lowerCAmelCase :int = output_from_no_past[:, -3:, random_slice_idx].detach() lowerCAmelCase :int = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-2 ) ) def UpperCAmelCase__ ( self : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple ) -> Tuple: lowerCAmelCase :Optional[Any] = MaMaaaModel(config=UpperCAmelCase ).to(UpperCAmelCase ).eval() lowerCAmelCase :str = model(**UpperCAmelCase ) lowerCAmelCase :Optional[int] = outputs.encoder_last_hidden_state lowerCAmelCase :Dict = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: lowerCAmelCase :List[str] = model.get_encoder() encoder.save_pretrained(UpperCAmelCase ) lowerCAmelCase :Optional[int] = MaMaaaEncoder.from_pretrained(UpperCAmelCase ).to(UpperCAmelCase ) lowerCAmelCase :List[Any] = encoder(inputs_dict['input_ids'] , attention_mask=inputs_dict['attention_mask'] )[ 0 ] self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 ) with tempfile.TemporaryDirectory() as tmpdirname: lowerCAmelCase :List[str] = model.get_decoder() decoder.save_pretrained(UpperCAmelCase ) lowerCAmelCase :Optional[int] = MaMaaaDecoder.from_pretrained(UpperCAmelCase ).to(UpperCAmelCase ) lowerCAmelCase :Dict = decoder( input_ids=inputs_dict['decoder_input_ids'] , attention_mask=inputs_dict['decoder_attention_mask'] , encoder_hidden_states=UpperCAmelCase , encoder_attention_mask=inputs_dict['attention_mask'] , )[0] self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 ) @require_torch class __UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase ): lowercase_ : str = ( ( MaMaaaModel, MaMaaaForConditionalGeneration, ) if is_torch_available() else () ) lowercase_ : Tuple = (MaMaaaForConditionalGeneration,) if is_torch_available() else () lowercase_ : Dict = ( { """conversational""": MaMaaaForConditionalGeneration, """feature-extraction""": MaMaaaModel, """summarization""": MaMaaaForConditionalGeneration, """text2text-generation""": MaMaaaForConditionalGeneration, """translation""": MaMaaaForConditionalGeneration, } if is_torch_available() else {} ) lowercase_ : Optional[int] = True lowercase_ : Union[str, Any] = True lowercase_ : str = False lowercase_ : Tuple = False def UpperCAmelCase__ ( self : Optional[Any] , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Any , UpperCAmelCase : Union[str, Any] ) -> List[Any]: if pipeline_test_casse_name == "TranslationPipelineTests": # Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`. # `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer. return True return False def UpperCAmelCase__ ( self : List[str] ) -> Dict: lowerCAmelCase :Any = MaMaaaModelTester(self ) lowerCAmelCase :str = ConfigTester(self , config_class=UpperCAmelCase ) def UpperCAmelCase__ ( self : int ) -> int: self.config_tester.run_common_tests() def UpperCAmelCase__ ( self : Any ) -> Tuple: lowerCAmelCase , lowerCAmelCase :List[str] = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: lowerCAmelCase :Optional[int] = model_class(UpperCAmelCase ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(UpperCAmelCase ) lowerCAmelCase , lowerCAmelCase :Tuple = model_class.from_pretrained(UpperCAmelCase , output_loading_info=UpperCAmelCase ) self.assertEqual(info['missing_keys'] , [] ) def UpperCAmelCase__ ( self : Tuple ) -> Optional[Any]: lowerCAmelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*UpperCAmelCase ) def UpperCAmelCase__ ( self : Dict ) -> Optional[int]: lowerCAmelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*UpperCAmelCase ) def UpperCAmelCase__ ( self : Dict ) -> int: lowerCAmelCase , lowerCAmelCase :str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration): lowerCAmelCase :Tuple = model_class(UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() lowerCAmelCase :List[str] = copy.deepcopy(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) ) if not self.is_encoder_decoder: lowerCAmelCase :int = inputs['input_ids'] del inputs["input_ids"] else: lowerCAmelCase :List[Any] = inputs['input_ids'] lowerCAmelCase :Optional[Any] = inputs.get('decoder_input_ids' , UpperCAmelCase ) del inputs["input_ids"] inputs.pop('decoder_input_ids' , UpperCAmelCase ) lowerCAmelCase :Any = model.get_input_embeddings() if not self.is_encoder_decoder: lowerCAmelCase :Union[str, Any] = wte(UpperCAmelCase ) else: lowerCAmelCase :Tuple = wte(UpperCAmelCase ) lowerCAmelCase :Union[str, Any] = wte(UpperCAmelCase ) with torch.no_grad(): model(**UpperCAmelCase )[0] def UpperCAmelCase__ ( self : Union[str, Any] ) -> List[str]: lowerCAmelCase , lowerCAmelCase :int = self.model_tester.prepare_config_and_inputs() lowerCAmelCase :Union[str, Any] = input_dict['input_ids'] lowerCAmelCase :int = input_ids.ne(1 ).to(UpperCAmelCase ) lowerCAmelCase :Dict = MaMaaaForConditionalGeneration(UpperCAmelCase ).eval().to(UpperCAmelCase ) if torch_device == "cuda": model.half() model.generate(UpperCAmelCase , attention_mask=UpperCAmelCase ) model.generate(num_beams=4 , do_sample=UpperCAmelCase , early_stopping=UpperCAmelCase , num_return_sequences=3 ) def UpperCAmelCase ( a__ ): '''simple docstring''' return torch.tensor(a__ , dtype=torch.long , device=a__ ) __SCREAMING_SNAKE_CASE = 1E-4 @require_torch @require_sentencepiece @require_tokenizers @slow class __UpperCamelCase ( unittest.TestCase ): @cached_property def UpperCAmelCase__ ( self : Dict ) -> Dict: return MaMaaaTokenizer.from_pretrained('facebook/m2m100_418M' ) def UpperCAmelCase__ ( self : List[str] ) -> List[str]: lowerCAmelCase :Dict = MaMaaaModel.from_pretrained('facebook/m2m100_418M' ).to(UpperCAmelCase ) lowerCAmelCase :Tuple = _long_tensor([[12_8028, 98, 12, 3_0527, 2732, 159, 7755, 6_1904, 3_9144, 38, 2]] ) lowerCAmelCase :Union[str, Any] = _long_tensor([[2, 12_8028, 98, 12, 3_0527, 2732, 159, 7755, 6_1904, 3_9144, 38]] ) lowerCAmelCase :Tuple = prepare_mam_aaa_inputs_dict(model.config , UpperCAmelCase , UpperCAmelCase ) with torch.no_grad(): lowerCAmelCase :Optional[int] = model(**UpperCAmelCase )[0] lowerCAmelCase :List[Any] = torch.Size((1, 11, 1024) ) self.assertEqual(output.shape , UpperCAmelCase ) # change to expected output here lowerCAmelCase :List[str] = torch.tensor( [[-0.7_7_8_0, -0.1_6_7_6, 0.1_0_3_8], [-6.7_5_5_6, -1.3_9_9_2, 0.0_5_6_7], [-7.5_3_8_3, -0.5_9_2_0, -0.2_7_7_9]] , device=UpperCAmelCase ) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase , atol=UpperCAmelCase ) ) def UpperCAmelCase__ ( self : List[Any] ) -> Union[str, Any]: lowerCAmelCase :Optional[Any] = MaMaaaForConditionalGeneration.from_pretrained('facebook/m2m100_418M' ).to(UpperCAmelCase ) # change to intended input lowerCAmelCase :List[str] = _long_tensor([[12_8028, 98, 12, 3_0527, 2732, 159, 7755, 6_1904, 3_9144, 38, 2]] ) lowerCAmelCase :List[Any] = _long_tensor([[2, 12_8028, 98, 12, 3_0527, 2732, 159, 7755, 6_1904, 3_9144, 38]] ) lowerCAmelCase :Dict = prepare_mam_aaa_inputs_dict(model.config , UpperCAmelCase , UpperCAmelCase ) with torch.no_grad(): lowerCAmelCase :Optional[Any] = model(**UpperCAmelCase )[0] lowerCAmelCase :str = torch.Size((1, 11, model.config.vocab_size) ) self.assertEqual(output.shape , UpperCAmelCase ) # change to expected output here lowerCAmelCase :List[Any] = torch.tensor( [[-1.0_4_4_8, -1.0_4_1_1, 3.7_9_9_2], [-3.2_1_9_1, -3.2_3_8_6, -1.3_4_5_1], [-3.6_2_1_0, -3.5_9_9_3, 0.4_9_2_5]] , device=UpperCAmelCase ) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase , atol=UpperCAmelCase ) ) def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]: lowerCAmelCase :List[Any] = MaMaaaForConditionalGeneration.from_pretrained('facebook/m2m100_418M' ).to(UpperCAmelCase ) lowerCAmelCase :Any = MaMaaaTokenizer.from_pretrained('facebook/m2m100_418M' , src_lang='fr' , tgt_lang='en' ) lowerCAmelCase :Union[str, Any] = [ 'L\'affaire NSA souligne l\'absence totale de débat sur le renseignement', 'Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.', 'Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent' ' Fabius convoque l\'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de' ' l\'ampleur de la surveillance américaine sur l\'ensemble des communications en France.', ] # The below article tests that we don't add any hypotheses outside of the top n_beams lowerCAmelCase :int = tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors='pt' ) lowerCAmelCase :Dict = model.generate( input_ids=dct['input_ids'].to(UpperCAmelCase ) , attention_mask=dct['attention_mask'].to(UpperCAmelCase ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id('en' ) , ) lowerCAmelCase :Optional[Any] = [ 'The NSA case highlights the total absence of intelligence debate', 'I think there are two levels of response from the French government.', 'When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S.' ' Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all' ' communications in France.', ] lowerCAmelCase :int = tokenizer.batch_decode( hypotheses_batch.tolist() , clean_up_tokenization_spaces=UpperCAmelCase , skip_special_tokens=UpperCAmelCase ) assert generated == expected_en
553
"""simple docstring""" import darl # noqa import gym import tqdm from diffusers.experimental import ValueGuidedRLPipeline __SCREAMING_SNAKE_CASE = { 'n_samples': 64, 'horizon': 32, 'num_inference_steps': 20, 'n_guide_steps': 2, # can set to 0 for faster sampling, does not use value network 'scale_grad_by_std': True, 'scale': 0.1, 'eta': 0.0, 't_grad_cutoff': 2, 'device': 'cpu', } if __name__ == "__main__": __SCREAMING_SNAKE_CASE = 'hopper-medium-v2' __SCREAMING_SNAKE_CASE = gym.make(env_name) __SCREAMING_SNAKE_CASE = ValueGuidedRLPipeline.from_pretrained( 'bglick13/hopper-medium-v2-value-function-hor32', env=env, ) env.seed(0) __SCREAMING_SNAKE_CASE = env.reset() __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 10_00 __SCREAMING_SNAKE_CASE = [obs.copy()] try: for t in tqdm.tqdm(range(T)): # call the policy __SCREAMING_SNAKE_CASE = pipeline(obs, planning_horizon=32) # execute action in environment __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE = env.step(denorm_actions) __SCREAMING_SNAKE_CASE = env.get_normalized_score(total_reward) # update return total_reward += reward total_score += score print( F"""Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:""" F""" {total_score}""" ) # save observations for rendering rollout.append(next_observation.copy()) __SCREAMING_SNAKE_CASE = next_observation except KeyboardInterrupt: pass print(F"""Total reward: {total_reward}""")
553
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available A_ : Optional[int] = { "configuration_roc_bert": ["ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoCBertConfig"], "tokenization_roc_bert": ["RoCBertTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: pass try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : Dict = [ "ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST", "RoCBertForCausalLM", "RoCBertForMaskedLM", "RoCBertForMultipleChoice", "RoCBertForPreTraining", "RoCBertForQuestionAnswering", "RoCBertForSequenceClassification", "RoCBertForTokenClassification", "RoCBertLayer", "RoCBertModel", "RoCBertPreTrainedModel", "load_tf_weights_in_roc_bert", ] if TYPE_CHECKING: from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig from .tokenization_roc_bert import RoCBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: raise OptionalDependencyNotAvailable() try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roc_bert import ( ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, RoCBertForCausalLM, RoCBertForMaskedLM, RoCBertForMultipleChoice, RoCBertForPreTraining, RoCBertForQuestionAnswering, RoCBertForSequenceClassification, RoCBertForTokenClassification, RoCBertLayer, RoCBertModel, RoCBertPreTrainedModel, load_tf_weights_in_roc_bert, ) else: import sys A_ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
701
"""simple docstring""" import unittest import numpy as np from diffusers import OnnxStableDiffusionInpaintPipelineLegacy from diffusers.utils.testing_utils import ( is_onnx_available, load_image, load_numpy, nightly, require_onnxruntime, require_torch_gpu, ) if is_onnx_available(): import onnxruntime as ort @nightly @require_onnxruntime @require_torch_gpu class lowerCamelCase (unittest.TestCase ): @property def SCREAMING_SNAKE_CASE ( self : Dict ) -> str: return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]: SCREAMING_SNAKE_CASE__ = ort.SessionOptions() SCREAMING_SNAKE_CASE__ = False return options def SCREAMING_SNAKE_CASE ( self : Dict ) -> Any: SCREAMING_SNAKE_CASE__ = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/in_paint/overture-creations-5sI6fQgYIuo.png""" ) SCREAMING_SNAKE_CASE__ = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" ) SCREAMING_SNAKE_CASE__ = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy""" ) # using the PNDM scheduler by default SCREAMING_SNAKE_CASE__ = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained( """CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) SCREAMING_SNAKE_CASE__ = """A red cat sitting on a park bench""" SCREAMING_SNAKE_CASE__ = np.random.RandomState(0 ) SCREAMING_SNAKE_CASE__ = pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=1_5 , generator=__UpperCAmelCase , output_type="""np""" , ) SCREAMING_SNAKE_CASE__ = output.images[0] assert image.shape == (5_1_2, 5_1_2, 3) assert np.abs(expected_image - image ).max() < 1e-2
616
0
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE_ : List[Any] = {'''configuration_timm_backbone''': ['''TimmBackboneConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_ : str = ['''TimmBackbone'''] if TYPE_CHECKING: from .configuration_timm_backbone import TimmBackboneConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timm_backbone import TimmBackbone else: import sys SCREAMING_SNAKE_CASE_ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
375
import os from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.streaming_download_manager import xopen, xsplitext from ..table import array_cast from ..utils.py_utils import no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: from .features import FeatureType SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ : Any = False, False, False @dataclass class snake_case_ : '''simple docstring''' __UpperCamelCase = None __UpperCamelCase = True __UpperCamelCase = True __UpperCamelCase = None # Automatically constructed __UpperCamelCase = "dict" __UpperCamelCase = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} ) __UpperCamelCase = field(default='''Audio''' , init=UpperCAmelCase_ , repr=UpperCAmelCase_ ) def __call__( self : Optional[Any] ) -> List[str]: '''simple docstring''' return self.pa_type def UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Union[str, bytes, dict] ) -> dict: '''simple docstring''' try: import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files. except ImportError as err: raise ImportError('To support encoding audio data, please install \'soundfile\'.' ) from err if isinstance(__lowerCamelCase , __lowerCamelCase ): return {"bytes": None, "path": value} elif isinstance(__lowerCamelCase , __lowerCamelCase ): return {"bytes": value, "path": None} elif "array" in value: # convert the audio array to wav bytes __lowercase = BytesIO() sf.write(__lowerCamelCase , value['array'] , value['sampling_rate'] , format='wav' ) return {"bytes": buffer.getvalue(), "path": None} elif value.get('path' ) is not None and os.path.isfile(value['path'] ): # we set "bytes": None to not duplicate the data if they're already available locally if value["path"].endswith('pcm' ): # "PCM" only has raw audio bytes if value.get('sampling_rate' ) is None: # At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate raise KeyError('To use PCM files, please specify a \'sampling_rate\' in Audio object' ) if value.get('bytes' ): # If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!) __lowercase = np.frombuffer(value['bytes'] , dtype=np.intaa ).astype(np.floataa ) / 32_767 else: __lowercase = np.memmap(value['path'] , dtype='h' , mode='r' ).astype(np.floataa ) / 32_767 __lowercase = BytesIO(bytes() ) sf.write(__lowerCamelCase , __lowerCamelCase , value['sampling_rate'] , format='wav' ) return {"bytes": buffer.getvalue(), "path": None} else: return {"bytes": None, "path": value.get('path' )} elif value.get('bytes' ) is not None or value.get('path' ) is not None: # store the audio bytes, and path is used to infer the audio format using the file extension return {"bytes": value.get('bytes' ), "path": value.get('path' )} else: raise ValueError( F"An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}." ) def UpperCAmelCase ( self : Any , __lowerCamelCase : dict , __lowerCamelCase : Optional[Dict[str, Union[str, bool, None]]] = None ) -> dict: '''simple docstring''' if not self.decode: raise RuntimeError('Decoding is disabled for this feature. Please use Audio(decode=True) instead.' ) __lowercase , __lowercase = (value['path'], BytesIO(value['bytes'] )) if value['bytes'] is not None else (value['path'], None) if path is None and file is None: raise ValueError(F"An audio sample should have one of 'path' or 'bytes' but both are None in {value}." ) try: import librosa import soundfile as sf except ImportError as err: raise ImportError('To support decoding audio files, please install \'librosa\' and \'soundfile\'.' ) from err __lowercase = xsplitext(__lowerCamelCase )[1][1:].lower() if path is not None else None if not config.IS_OPUS_SUPPORTED and audio_format == "opus": raise RuntimeError( 'Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, ' 'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ' ) elif not config.IS_MP3_SUPPORTED and audio_format == "mp3": raise RuntimeError( 'Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, ' 'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ' ) if file is None: __lowercase = token_per_repo_id or {} __lowercase = path.split('::' )[-1] try: __lowercase = string_to_dict(__lowerCamelCase , config.HUB_DATASETS_URL )['repo_id'] __lowercase = token_per_repo_id[repo_id] except (ValueError, KeyError): __lowercase = None with xopen(__lowerCamelCase , 'rb' , use_auth_token=__lowerCamelCase ) as f: __lowercase , __lowercase = sf.read(__lowerCamelCase ) else: __lowercase , __lowercase = sf.read(__lowerCamelCase ) __lowercase = array.T if self.mono: __lowercase = librosa.to_mono(__lowerCamelCase ) if self.sampling_rate and self.sampling_rate != sampling_rate: __lowercase = librosa.resample(__lowerCamelCase , orig_sr=__lowerCamelCase , target_sr=self.sampling_rate ) __lowercase = self.sampling_rate return {"path": path, "array": array, "sampling_rate": sampling_rate} def UpperCAmelCase ( self : int ) -> Union["FeatureType", Dict[str, "FeatureType"]]: '''simple docstring''' from .features import Value if self.decode: raise ValueError('Cannot flatten a decoded Audio feature.' ) return { "bytes": Value('binary' ), "path": Value('string' ), } def UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Union[pa.StringArray, pa.StructArray] ) -> pa.StructArray: '''simple docstring''' if pa.types.is_string(storage.type ): __lowercase = pa.array([None] * len(__lowerCamelCase ) , type=pa.binary() ) __lowercase = pa.StructArray.from_arrays([bytes_array, storage] , ['bytes', 'path'] , mask=storage.is_null() ) elif pa.types.is_binary(storage.type ): __lowercase = pa.array([None] * len(__lowerCamelCase ) , type=pa.string() ) __lowercase = pa.StructArray.from_arrays([storage, path_array] , ['bytes', 'path'] , mask=storage.is_null() ) elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices('array' ): __lowercase = pa.array([Audio().encode_example(__lowerCamelCase ) if x is not None else None for x in storage.to_pylist()] ) elif pa.types.is_struct(storage.type ): if storage.type.get_field_index('bytes' ) >= 0: __lowercase = storage.field('bytes' ) else: __lowercase = pa.array([None] * len(__lowerCamelCase ) , type=pa.binary() ) if storage.type.get_field_index('path' ) >= 0: __lowercase = storage.field('path' ) else: __lowercase = pa.array([None] * len(__lowerCamelCase ) , type=pa.string() ) __lowercase = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=storage.is_null() ) return array_cast(__lowerCamelCase , self.pa_type ) def UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : pa.StructArray ) -> pa.StructArray: '''simple docstring''' @no_op_if_value_is_null def path_to_bytes(__lowerCamelCase : Any ): with xopen(__lowerCamelCase , 'rb' ) as f: __lowercase = f.read() return bytes_ __lowercase = pa.array( [ (path_to_bytes(x['path'] ) if x['bytes'] is None else x['bytes']) if x is not None else None for x in storage.to_pylist() ] , type=pa.binary() , ) __lowercase = pa.array( [os.path.basename(__lowerCamelCase ) if path is not None else None for path in storage.field('path' ).to_pylist()] , type=pa.string() , ) __lowercase = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=bytes_array.is_null() ) return array_cast(__lowerCamelCase , self.pa_type )
375
1
from __future__ import annotations from math import pi from typing import Protocol import matplotlib.pyplot as plt import numpy as np class snake_case__( UpperCAmelCase__ ): '''simple docstring''' def lowercase_ ( self , __lowercase ) -> float: return 0.0 def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> tuple[int | float, int | float]: lowerCAmelCase_ : int = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] ) lowerCAmelCase_ : str = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] ) return lowest, highest def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> None: lowerCAmelCase_ : List[Any] = 512 lowerCAmelCase_ : Any = [1] + [0] * (size - 1) lowerCAmelCase_ : Optional[int] = [filter_type.process(lowerCAmelCase_ ) for item in inputs] lowerCAmelCase_ : int = [0] * (samplerate - size) # zero-padding outputs += filler lowerCAmelCase_ : Tuple = np.abs(np.fft.fft(lowerCAmelCase_ ) ) lowerCAmelCase_ : Any = 20 * np.logaa(lowerCAmelCase_ ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel('''Frequency (Hz)''' ) plt.xscale('''log''' ) # Display within reasonable bounds lowerCAmelCase_ : Dict = get_bounds(lowerCAmelCase_ , lowerCAmelCase_ ) plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) ) plt.ylabel('''Gain (dB)''' ) plt.plot(lowerCAmelCase_ ) plt.show() def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> None: lowerCAmelCase_ : Union[str, Any] = 512 lowerCAmelCase_ : Optional[Any] = [1] + [0] * (size - 1) lowerCAmelCase_ : Optional[Any] = [filter_type.process(lowerCAmelCase_ ) for item in inputs] lowerCAmelCase_ : Optional[Any] = [0] * (samplerate - size) # zero-padding outputs += filler lowerCAmelCase_ : Dict = np.angle(np.fft.fft(lowerCAmelCase_ ) ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel('''Frequency (Hz)''' ) plt.xscale('''log''' ) plt.ylim(-2 * pi , 2 * pi ) plt.ylabel('''Phase shift (Radians)''' ) plt.plot(np.unwrap(lowerCAmelCase_ , -2 * pi ) ) plt.show()
708
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Audio, Features, Value from .base import TaskTemplate @dataclass(frozen=UpperCAmelCase__ ) class snake_case__( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = field(default="""automatic-speech-recognition""", metadata={"""include_in_asdict_even_if_is_default""": True} ) SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({"""audio""": Audio()} ) SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({"""transcription""": Value("""string""" )} ) SCREAMING_SNAKE_CASE__ : str = "audio" SCREAMING_SNAKE_CASE__ : str = "transcription" def lowercase_ ( self , __lowercase ) -> int: if self.audio_column not in features: raise ValueError(f"""Column {self.audio_column} is not present in features.""" ) if not isinstance(features[self.audio_column] , __lowercase ): raise ValueError(f"""Column {self.audio_column} is not an Audio type.""" ) lowerCAmelCase_ : List[str] = copy.deepcopy(self ) lowerCAmelCase_ : Optional[Any] = self.input_schema.copy() lowerCAmelCase_ : Optional[Any] = features[self.audio_column] lowerCAmelCase_ : List[str] = input_schema return task_template @property def lowercase_ ( self ) -> Dict[str, str]: return {self.audio_column: "audio", self.transcription_column: "transcription"}
619
0
'''simple docstring''' from manim import * class snake_case ( lowercase_ ): """simple docstring""" def a__ ( self ) -> str: SCREAMING_SNAKE_CASE_ = Rectangle(height=0.5, width=0.5 ) SCREAMING_SNAKE_CASE_ = Rectangle(height=0.25, width=0.25 ) SCREAMING_SNAKE_CASE_ = Rectangle(height=0.46, width=0.46 ).set_stroke(width=0 ) SCREAMING_SNAKE_CASE_ = [mem.copy() for i in range(6 )] SCREAMING_SNAKE_CASE_ = [mem.copy() for i in range(6 )] SCREAMING_SNAKE_CASE_ = VGroup(*_lowercase ).arrange(_lowercase, buff=0 ) SCREAMING_SNAKE_CASE_ = VGroup(*_lowercase ).arrange(_lowercase, buff=0 ) SCREAMING_SNAKE_CASE_ = VGroup(_lowercase, _lowercase ).arrange(_lowercase, buff=0 ) SCREAMING_SNAKE_CASE_ = Text('CPU', font_size=24 ) SCREAMING_SNAKE_CASE_ = Group(_lowercase, _lowercase ).arrange(_lowercase, buff=0.5, aligned_edge=_lowercase ) cpu.move_to([-2.5, -0.5, 0] ) self.add(_lowercase ) SCREAMING_SNAKE_CASE_ = [mem.copy() for i in range(4 )] SCREAMING_SNAKE_CASE_ = VGroup(*_lowercase ).arrange(_lowercase, buff=0 ) SCREAMING_SNAKE_CASE_ = Text('GPU', font_size=24 ) SCREAMING_SNAKE_CASE_ = Group(_lowercase, _lowercase ).arrange(_lowercase, buff=0.5, aligned_edge=_lowercase ) gpu.move_to([-1, -1, 0] ) self.add(_lowercase ) SCREAMING_SNAKE_CASE_ = [mem.copy() for i in range(6 )] SCREAMING_SNAKE_CASE_ = VGroup(*_lowercase ).arrange(_lowercase, buff=0 ) SCREAMING_SNAKE_CASE_ = Text('Model', font_size=24 ) SCREAMING_SNAKE_CASE_ = Group(_lowercase, _lowercase ).arrange(_lowercase, buff=0.5, aligned_edge=_lowercase ) model.move_to([3, -1.0, 0] ) self.add(_lowercase ) SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = [] for i, rect in enumerate(_lowercase ): rect.set_stroke(_lowercase ) SCREAMING_SNAKE_CASE_ = Rectangle(height=0.46 / 4, width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(_lowercase, opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ), buff=0.02, direction=_lowercase ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(model_cpu_arr[0], direction=_lowercase, buff=0.0 ) else: cpu_target.next_to(model_cpu_arr[i - 1], direction=_lowercase, buff=0.0 ) self.add(_lowercase ) model_cpu_arr.append(_lowercase ) self.add(*_lowercase, *_lowercase, *_lowercase ) SCREAMING_SNAKE_CASE_ = [mem.copy() for i in range(6 )] SCREAMING_SNAKE_CASE_ = VGroup(*_lowercase ).arrange(_lowercase, buff=0 ) SCREAMING_SNAKE_CASE_ = Text('Loaded Checkpoint', font_size=24 ) SCREAMING_SNAKE_CASE_ = Group(_lowercase, _lowercase ).arrange(_lowercase, buff=0.5, aligned_edge=_lowercase ) checkpoint.move_to([3, 0.5, 0] ) self.add(_lowercase ) SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = [] for i, rect in enumerate(_lowercase ): SCREAMING_SNAKE_CASE_ = fill.copy().set_fill(_lowercase, opacity=0.7 ) target.move_to(_lowercase ) ckpt_arr.append(_lowercase ) SCREAMING_SNAKE_CASE_ = target.copy() if i < 5: cpu_target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.move_to(cpu_right_col_base[i - 5] ) ckpt_cpu_arr.append(_lowercase ) self.add(*_lowercase, *_lowercase ) SCREAMING_SNAKE_CASE_ = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) SCREAMING_SNAKE_CASE_ = MarkupText( f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""", font_size=18, ) key_text.move_to([-5, 2.4, 0] ) self.add(_lowercase, _lowercase ) SCREAMING_SNAKE_CASE_ = MarkupText( f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""", font_size=18, ) blue_text.next_to(_lowercase, DOWN * 2.4, aligned_edge=key_text.get_left() ) self.add(_lowercase ) SCREAMING_SNAKE_CASE_ = MarkupText( f"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""", font_size=24, ) step_a.move_to([2, 2, 0] ) SCREAMING_SNAKE_CASE_ = [meta_mem.copy() for i in range(6 )] SCREAMING_SNAKE_CASE_ = [meta_mem.copy() for i in range(6 )] SCREAMING_SNAKE_CASE_ = VGroup(*_lowercase ).arrange(_lowercase, buff=0 ) SCREAMING_SNAKE_CASE_ = VGroup(*_lowercase ).arrange(_lowercase, buff=0 ) SCREAMING_SNAKE_CASE_ = VGroup(_lowercase, _lowercase ).arrange(_lowercase, buff=0 ) SCREAMING_SNAKE_CASE_ = Text('Disk', font_size=24 ) SCREAMING_SNAKE_CASE_ = Group(_lowercase, _lowercase ).arrange(_lowercase, buff=0.5, aligned_edge=_lowercase ) disk.move_to([-4.0, -1.25, 0] ) self.play(Write(_lowercase, run_time=3 ), Write(_lowercase, run_time=1 ), Create(_lowercase, run_time=1 ) ) SCREAMING_SNAKE_CASE_ = [] for i, rect in enumerate(_lowercase ): SCREAMING_SNAKE_CASE_ = rect.copy() target.generate_target() target.target.move_to(disk_left_col_base[i] ).scale(0.5 ) animations.append(MoveToTarget(_lowercase, run_time=1.5 ) ) self.play(*_lowercase ) self.play(FadeOut(_lowercase ) ) SCREAMING_SNAKE_CASE_ = MarkupText(f"""Then, the checkpoint is removed from memory\nthrough garbage collection.""", font_size=24 ) step_a.move_to([2, 2, 0] ) self.play(Write(_lowercase, run_time=3 ) ) self.play( FadeOut(_lowercase, _lowercase, *_lowercase, *_lowercase ), ) self.wait()
294
'''simple docstring''' import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, AutoConfig, AutoFeatureExtractor, WavaVecaConfig, WavaVecaFeatureExtractor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 SCREAMING_SNAKE_CASE : int = get_tests_dir("fixtures") SCREAMING_SNAKE_CASE : str = get_tests_dir("fixtures/dummy_feature_extractor_config.json") SCREAMING_SNAKE_CASE : str = get_tests_dir("fixtures/dummy-config.json") class snake_case ( unittest.TestCase ): """simple docstring""" def a__ ( self ) -> str: SCREAMING_SNAKE_CASE_ = 0 def a__ ( self ) -> Tuple: SCREAMING_SNAKE_CASE_ = AutoFeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h' ) self.assertIsInstance(_lowercase, _lowercase ) def a__ ( self ) -> List[str]: SCREAMING_SNAKE_CASE_ = AutoFeatureExtractor.from_pretrained(_lowercase ) self.assertIsInstance(_lowercase, _lowercase ) def a__ ( self ) -> str: with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE_ = WavaVecaConfig() # remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally SCREAMING_SNAKE_CASE_ = AutoFeatureExtractor.from_pretrained(_lowercase ).to_dict() config_dict.pop('feature_extractor_type' ) SCREAMING_SNAKE_CASE_ = WavaVecaFeatureExtractor(**_lowercase ) # save in new folder model_config.save_pretrained(_lowercase ) config.save_pretrained(_lowercase ) SCREAMING_SNAKE_CASE_ = AutoFeatureExtractor.from_pretrained(_lowercase ) # make sure private variable is not incorrectly saved SCREAMING_SNAKE_CASE_ = json.loads(config.to_json_string() ) self.assertTrue('_processor_class' not in dict_as_saved ) self.assertIsInstance(_lowercase, _lowercase ) def a__ ( self ) -> int: SCREAMING_SNAKE_CASE_ = AutoFeatureExtractor.from_pretrained(_lowercase ) self.assertIsInstance(_lowercase, _lowercase ) def a__ ( self ) -> Any: with self.assertRaisesRegex( _lowercase, 'bert-base is not a local folder and is not a valid model identifier' ): SCREAMING_SNAKE_CASE_ = AutoFeatureExtractor.from_pretrained('bert-base' ) def a__ ( self ) -> List[Any]: with self.assertRaisesRegex( _lowercase, R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ): SCREAMING_SNAKE_CASE_ = AutoFeatureExtractor.from_pretrained(_lowercase, revision='aaaaaa' ) def a__ ( self ) -> List[Any]: with self.assertRaisesRegex( _lowercase, 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.', ): SCREAMING_SNAKE_CASE_ = AutoFeatureExtractor.from_pretrained('hf-internal-testing/config-no-model' ) def a__ ( self ) -> List[str]: # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(_lowercase ): SCREAMING_SNAKE_CASE_ = AutoFeatureExtractor.from_pretrained( 'hf-internal-testing/test_dynamic_feature_extractor' ) # If remote code is disabled, we can't load this config. with self.assertRaises(_lowercase ): SCREAMING_SNAKE_CASE_ = AutoFeatureExtractor.from_pretrained( 'hf-internal-testing/test_dynamic_feature_extractor', trust_remote_code=_lowercase ) SCREAMING_SNAKE_CASE_ = AutoFeatureExtractor.from_pretrained( 'hf-internal-testing/test_dynamic_feature_extractor', trust_remote_code=_lowercase ) self.assertEqual(feature_extractor.__class__.__name__, 'NewFeatureExtractor' ) # Test feature extractor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(_lowercase ) SCREAMING_SNAKE_CASE_ = AutoFeatureExtractor.from_pretrained(_lowercase, trust_remote_code=_lowercase ) self.assertEqual(reloaded_feature_extractor.__class__.__name__, 'NewFeatureExtractor' ) def a__ ( self ) -> int: try: AutoConfig.register('custom', _lowercase ) AutoFeatureExtractor.register(_lowercase, _lowercase ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(_lowercase ): AutoFeatureExtractor.register(_lowercase, _lowercase ) # Now that the config is registered, it can be used as any other config with the auto-API SCREAMING_SNAKE_CASE_ = CustomFeatureExtractor.from_pretrained(_lowercase ) with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(_lowercase ) SCREAMING_SNAKE_CASE_ = AutoFeatureExtractor.from_pretrained(_lowercase ) self.assertIsInstance(_lowercase, _lowercase ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] def a__ ( self ) -> str: class snake_case ( lowercase_ ): """simple docstring""" _a = True try: AutoConfig.register('custom', _lowercase ) AutoFeatureExtractor.register(_lowercase, _lowercase ) # If remote code is not set, the default is to use local SCREAMING_SNAKE_CASE_ = AutoFeatureExtractor.from_pretrained( 'hf-internal-testing/test_dynamic_feature_extractor' ) self.assertEqual(feature_extractor.__class__.__name__, 'NewFeatureExtractor' ) self.assertTrue(feature_extractor.is_local ) # If remote code is disabled, we load the local one. SCREAMING_SNAKE_CASE_ = AutoFeatureExtractor.from_pretrained( 'hf-internal-testing/test_dynamic_feature_extractor', trust_remote_code=_lowercase ) self.assertEqual(feature_extractor.__class__.__name__, 'NewFeatureExtractor' ) self.assertTrue(feature_extractor.is_local ) # If remote is enabled, we load from the Hub SCREAMING_SNAKE_CASE_ = AutoFeatureExtractor.from_pretrained( 'hf-internal-testing/test_dynamic_feature_extractor', trust_remote_code=_lowercase ) self.assertEqual(feature_extractor.__class__.__name__, 'NewFeatureExtractor' ) self.assertTrue(not hasattr(_lowercase, 'is_local' ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
294
1
'''simple docstring''' from __future__ import annotations def a ( UpperCamelCase_ : list , UpperCamelCase_ : int ) -> Optional[int]: # Checks if the entire collection has been sorted if len(UpperCamelCase_ ) <= 1 or n <= 1: return insert_next(UpperCamelCase_ , n - 1 ) rec_insertion_sort(UpperCamelCase_ , n - 1 ) def a ( UpperCamelCase_ : list , UpperCamelCase_ : int ) -> Optional[int]: # Checks order between adjacent elements if index >= len(UpperCamelCase_ ) or collection[index - 1] <= collection[index]: return # Swaps adjacent elements since they are not in ascending order snake_case__ , snake_case__ =( collection[index], collection[index - 1], ) insert_next(UpperCamelCase_ , index + 1 ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : List[str] = input('''Enter integers separated by spaces: ''') SCREAMING_SNAKE_CASE__ : list[int] = [int(num) for num in numbers.split()] rec_insertion_sort(number_list, len(number_list)) print(number_list)
581
'''simple docstring''' import argparse import tensorflow as tf import torch from transformers import BertConfig, BertForMaskedLM from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertPooler, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging logging.set_verbosity_info() def a ( UpperCamelCase_ : str , UpperCamelCase_ : str , UpperCamelCase_ : str ) -> List[str]: def get_masked_lm_array(UpperCamelCase_ : str ): snake_case__ =f"""masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE""" snake_case__ =tf.train.load_variable(UpperCamelCase_ , UpperCamelCase_ ) if "kernel" in name: snake_case__ =array.transpose() return torch.from_numpy(UpperCamelCase_ ) def get_encoder_array(UpperCamelCase_ : str ): snake_case__ =f"""encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE""" snake_case__ =tf.train.load_variable(UpperCamelCase_ , UpperCamelCase_ ) if "kernel" in name: snake_case__ =array.transpose() return torch.from_numpy(UpperCamelCase_ ) def get_encoder_layer_array(UpperCamelCase_ : int , UpperCamelCase_ : str ): snake_case__ =f"""encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE""" snake_case__ =tf.train.load_variable(UpperCamelCase_ , UpperCamelCase_ ) if "kernel" in name: snake_case__ =array.transpose() return torch.from_numpy(UpperCamelCase_ ) def get_encoder_attention_layer_array(UpperCamelCase_ : int , UpperCamelCase_ : str , UpperCamelCase_ : int ): snake_case__ =f"""encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE""" snake_case__ =tf.train.load_variable(UpperCamelCase_ , UpperCamelCase_ ) snake_case__ =array.reshape(UpperCamelCase_ ) if "kernel" in name: snake_case__ =array.transpose() return torch.from_numpy(UpperCamelCase_ ) print(f"""Loading model based on config from {config_path}...""" ) snake_case__ =BertConfig.from_json_file(UpperCamelCase_ ) snake_case__ =BertForMaskedLM(UpperCamelCase_ ) # Layers for layer_index in range(0 , config.num_hidden_layers ): snake_case__ =model.bert.encoder.layer[layer_index] # Self-attention snake_case__ =layer.attention.self snake_case__ =get_encoder_attention_layer_array( UpperCamelCase_ , '_query_dense/kernel' , self_attn.query.weight.data.shape ) snake_case__ =get_encoder_attention_layer_array( UpperCamelCase_ , '_query_dense/bias' , self_attn.query.bias.data.shape ) snake_case__ =get_encoder_attention_layer_array( UpperCamelCase_ , '_key_dense/kernel' , self_attn.key.weight.data.shape ) snake_case__ =get_encoder_attention_layer_array( UpperCamelCase_ , '_key_dense/bias' , self_attn.key.bias.data.shape ) snake_case__ =get_encoder_attention_layer_array( UpperCamelCase_ , '_value_dense/kernel' , self_attn.value.weight.data.shape ) snake_case__ =get_encoder_attention_layer_array( UpperCamelCase_ , '_value_dense/bias' , self_attn.value.bias.data.shape ) # Self-attention Output snake_case__ =layer.attention.output snake_case__ =get_encoder_attention_layer_array( UpperCamelCase_ , '_output_dense/kernel' , self_output.dense.weight.data.shape ) snake_case__ =get_encoder_attention_layer_array( UpperCamelCase_ , '_output_dense/bias' , self_output.dense.bias.data.shape ) snake_case__ =get_encoder_layer_array(UpperCamelCase_ , '_attention_layer_norm/gamma' ) snake_case__ =get_encoder_layer_array(UpperCamelCase_ , '_attention_layer_norm/beta' ) # Intermediate snake_case__ =layer.intermediate snake_case__ =get_encoder_layer_array(UpperCamelCase_ , '_intermediate_dense/kernel' ) snake_case__ =get_encoder_layer_array(UpperCamelCase_ , '_intermediate_dense/bias' ) # Output snake_case__ =layer.output snake_case__ =get_encoder_layer_array(UpperCamelCase_ , '_output_dense/kernel' ) snake_case__ =get_encoder_layer_array(UpperCamelCase_ , '_output_dense/bias' ) snake_case__ =get_encoder_layer_array(UpperCamelCase_ , '_output_layer_norm/gamma' ) snake_case__ =get_encoder_layer_array(UpperCamelCase_ , '_output_layer_norm/beta' ) # Embeddings snake_case__ =get_encoder_array('_position_embedding_layer/embeddings' ) snake_case__ =get_encoder_array('_type_embedding_layer/embeddings' ) snake_case__ =get_encoder_array('_embedding_norm_layer/gamma' ) snake_case__ =get_encoder_array('_embedding_norm_layer/beta' ) # LM Head snake_case__ =model.cls.predictions.transform snake_case__ =get_masked_lm_array('dense/kernel' ) snake_case__ =get_masked_lm_array('dense/bias' ) snake_case__ =get_masked_lm_array('layer_norm/gamma' ) snake_case__ =get_masked_lm_array('layer_norm/beta' ) snake_case__ =get_masked_lm_array('embedding_table' ) # Pooling snake_case__ =BertPooler(config=UpperCamelCase_ ) snake_case__ =get_encoder_array('_pooler_layer/kernel' ) snake_case__ =get_encoder_array('_pooler_layer/bias' ) # Export final model model.save_pretrained(UpperCamelCase_ ) # Integration test - should load without any errors ;) snake_case__ =BertForMaskedLM.from_pretrained(UpperCamelCase_ ) print(new_model.eval() ) print('Model conversion was done sucessfully!' ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser() parser.add_argument( '''--tf_checkpoint_path''', type=str, required=True, help='''Path to the TensorFlow Token Dropping checkpoint path.''' ) parser.add_argument( '''--bert_config_file''', type=str, required=True, help='''The config json file corresponding to the BERT model. This specifies the model architecture.''', ) parser.add_argument( '''--pytorch_dump_path''', type=str, required=True, help='''Path to the output PyTorch model.''', ) SCREAMING_SNAKE_CASE__ : Optional[Any] = parser.parse_args() convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
581
1
import os import zipfile import requests from get_ci_error_statistics import download_artifact, get_artifacts_links def UpperCamelCase (lowercase_: Optional[int] , lowercase_: Tuple=7 ) -> Dict: A__ : Optional[int] = None if token is not None: A__ : Any = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"""Bearer {token}"""} # The id of a workflow (not of a workflow run) A__ : List[str] = """636036""" A__ : Optional[int] = f"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs""" # On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results url += f"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}""" A__ : List[str] = requests.get(SCREAMING_SNAKE_CASE_ , headers=SCREAMING_SNAKE_CASE_ ).json() return result["workflow_runs"] def UpperCamelCase (lowercase_: Optional[Any] ) -> List[Any]: A__ : str = get_daily_ci_runs(SCREAMING_SNAKE_CASE_ ) A__ : Optional[Any] = None for workflow_run in workflow_runs: if workflow_run["status"] == "completed": A__ : List[str] = workflow_run["""id"""] break return workflow_run_id def UpperCamelCase (lowercase_: Union[str, Any] , lowercase_: List[Any] , lowercase_: Dict ) -> Dict: A__ : Optional[Any] = get_last_daily_ci_runs(SCREAMING_SNAKE_CASE_ ) if workflow_run_id is not None: A__ : Any = get_artifacts_links(worflow_run_id=SCREAMING_SNAKE_CASE_ , token=SCREAMING_SNAKE_CASE_ ) for artifact_name in artifact_names: if artifact_name in artifacts_links: A__ : List[str] = artifacts_links[artifact_name] download_artifact( artifact_name=SCREAMING_SNAKE_CASE_ , artifact_url=SCREAMING_SNAKE_CASE_ , output_dir=SCREAMING_SNAKE_CASE_ , token=SCREAMING_SNAKE_CASE_ ) def UpperCamelCase (lowercase_: str , lowercase_: int , lowercase_: Tuple ) -> List[str]: get_last_daily_ci_artifacts(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) A__ : Any = {} for artifact_name in artifact_names: A__ : int = os.path.join(SCREAMING_SNAKE_CASE_ , f"""{artifact_name}.zip""" ) if os.path.isfile(SCREAMING_SNAKE_CASE_ ): A__ : Any = {} with zipfile.ZipFile(SCREAMING_SNAKE_CASE_ ) as z: for filename in z.namelist(): if not os.path.isdir(SCREAMING_SNAKE_CASE_ ): # read the file with z.open(SCREAMING_SNAKE_CASE_ ) as f: A__ : Any = f.read().decode("""UTF-8""" ) return results
456
'''simple docstring''' import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase__ : List[Any] = logging.get_logger(__name__) UpperCamelCase__ : str = { "asapp/sew-tiny-100k": "https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json", # See all SEW models at https://huggingface.co/models?filter=sew } class _a (_lowerCamelCase): """simple docstring""" SCREAMING_SNAKE_CASE = 'sew' def __init__( self , A__=32 , A__=7_68 , A__=12 , A__=12 , A__=30_72 , A__=2 , A__="gelu" , A__=0.1 , A__=0.1 , A__=0.1 , A__=0.0 , A__=0.1 , A__=0.1 , A__=0.02 , A__=1E-5 , A__="group" , A__="gelu" , A__=(64, 1_28, 1_28, 1_28, 1_28, 2_56, 2_56, 2_56, 2_56, 5_12, 5_12, 5_12, 5_12) , A__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , A__=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , A__=False , A__=1_28 , A__=16 , A__=True , A__=0.05 , A__=10 , A__=2 , A__=0.0 , A__=10 , A__=0 , A__="mean" , A__=False , A__=False , A__=2_56 , A__=0 , A__=1 , A__=2 , **A__ , ) -> List[str]: super().__init__(**A__ , pad_token_id=A__ , bos_token_id=A__ , eos_token_id=A__ ) _SCREAMING_SNAKE_CASE = hidden_size _SCREAMING_SNAKE_CASE = feat_extract_norm _SCREAMING_SNAKE_CASE = feat_extract_activation _SCREAMING_SNAKE_CASE = list(A__ ) _SCREAMING_SNAKE_CASE = list(A__ ) _SCREAMING_SNAKE_CASE = list(A__ ) _SCREAMING_SNAKE_CASE = conv_bias _SCREAMING_SNAKE_CASE = num_conv_pos_embeddings _SCREAMING_SNAKE_CASE = num_conv_pos_embedding_groups _SCREAMING_SNAKE_CASE = len(self.conv_dim ) _SCREAMING_SNAKE_CASE = num_hidden_layers _SCREAMING_SNAKE_CASE = intermediate_size _SCREAMING_SNAKE_CASE = squeeze_factor _SCREAMING_SNAKE_CASE = hidden_act _SCREAMING_SNAKE_CASE = num_attention_heads _SCREAMING_SNAKE_CASE = hidden_dropout _SCREAMING_SNAKE_CASE = attention_dropout _SCREAMING_SNAKE_CASE = activation_dropout _SCREAMING_SNAKE_CASE = feat_proj_dropout _SCREAMING_SNAKE_CASE = final_dropout _SCREAMING_SNAKE_CASE = layerdrop _SCREAMING_SNAKE_CASE = layer_norm_eps _SCREAMING_SNAKE_CASE = initializer_range _SCREAMING_SNAKE_CASE = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( """Configuration for convolutional layers is incorrect.""" """It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,""" F"but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)" F"= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`." ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _SCREAMING_SNAKE_CASE = apply_spec_augment _SCREAMING_SNAKE_CASE = mask_time_prob _SCREAMING_SNAKE_CASE = mask_time_length _SCREAMING_SNAKE_CASE = mask_time_min_masks _SCREAMING_SNAKE_CASE = mask_feature_prob _SCREAMING_SNAKE_CASE = mask_feature_length _SCREAMING_SNAKE_CASE = mask_feature_min_masks # ctc loss _SCREAMING_SNAKE_CASE = ctc_loss_reduction _SCREAMING_SNAKE_CASE = ctc_zero_infinity # sequence classification _SCREAMING_SNAKE_CASE = use_weighted_layer_sum _SCREAMING_SNAKE_CASE = classifier_proj_size @property def UpperCamelCase ( self ) -> Any: return functools.reduce(operator.mul , self.conv_stride , 1 )
591
0
"""simple docstring""" from typing import Optional, Union import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models.modeling_utils import ModelMixin class UpperCamelCase ( __lowerCAmelCase , __lowerCAmelCase ): @register_to_config def __init__( self , snake_case__ = 768 , ): """simple docstring""" super().__init__() _SCREAMING_SNAKE_CASE : Optional[int] = nn.Parameter(torch.zeros(1 , _UpperCamelCase ) ) _SCREAMING_SNAKE_CASE : int = nn.Parameter(torch.ones(1 , _UpperCamelCase ) ) def __SCREAMING_SNAKE_CASE ( self , snake_case__ = None , snake_case__ = None , ): """simple docstring""" _SCREAMING_SNAKE_CASE : int = nn.Parameter(self.mean.to(_UpperCamelCase ).to(_UpperCamelCase ) ) _SCREAMING_SNAKE_CASE : Optional[Any] = nn.Parameter(self.std.to(_UpperCamelCase ).to(_UpperCamelCase ) ) return self def __SCREAMING_SNAKE_CASE ( self , snake_case__ ): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = (embeds - self.mean) * 1.0 / self.std return embeds def __SCREAMING_SNAKE_CASE ( self , snake_case__ ): """simple docstring""" _SCREAMING_SNAKE_CASE : Any = (embeds * self.std) + self.mean return embeds
715
"""simple docstring""" import itertools from dataclasses import dataclass from typing import List, Optional import pyarrow as pa import pyarrow.parquet as pq import datasets from datasets.table import table_cast lowercase_ : List[str] = datasets.utils.logging.get_logger(__name__) @dataclass class UpperCamelCase ( datasets.BuilderConfig ): A__ = 10000 A__ = None A__ = None class UpperCamelCase ( datasets.ArrowBasedBuilder ): A__ = ParquetConfig def __SCREAMING_SNAKE_CASE ( self ): """simple docstring""" return datasets.DatasetInfo(features=self.config.features ) def __SCREAMING_SNAKE_CASE ( self , snake_case__ ): """simple docstring""" if not self.config.data_files: raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' ) _SCREAMING_SNAKE_CASE : List[Any] = dl_manager.download_and_extract(self.config.data_files ) if isinstance(snake_case__ , (str, list, tuple) ): _SCREAMING_SNAKE_CASE : int = data_files if isinstance(snake_case__ , snake_case__ ): _SCREAMING_SNAKE_CASE : List[Any] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive _SCREAMING_SNAKE_CASE : Optional[Any] = [dl_manager.iter_files(snake_case__ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )] _SCREAMING_SNAKE_CASE : Tuple = [] for split_name, files in data_files.items(): if isinstance(snake_case__ , snake_case__ ): _SCREAMING_SNAKE_CASE : Optional[int] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive _SCREAMING_SNAKE_CASE : Tuple = [dl_manager.iter_files(snake_case__ ) for file in files] # Infer features is they are stoed in the arrow schema if self.info.features is None: for file in itertools.chain.from_iterable(snake_case__ ): with open(snake_case__ , "rb" ) as f: _SCREAMING_SNAKE_CASE : Optional[int] = datasets.Features.from_arrow_schema(pq.read_schema(snake_case__ ) ) break splits.append(datasets.SplitGenerator(name=snake_case__ , gen_kwargs={"files": files} ) ) return splits def __SCREAMING_SNAKE_CASE ( self , snake_case__ ): """simple docstring""" if self.info.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example _SCREAMING_SNAKE_CASE : Union[str, Any] = table_cast(snake_case__ , self.info.features.arrow_schema ) return pa_table def __SCREAMING_SNAKE_CASE ( self , snake_case__ ): """simple docstring""" _SCREAMING_SNAKE_CASE : Any = self.info.features.arrow_schema if self.info.features is not None else None if self.info.features is not None and self.config.columns is not None: if sorted(field.name for field in schema ) != sorted(self.config.columns ): raise ValueError( F'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''' ) for file_idx, file in enumerate(itertools.chain.from_iterable(snake_case__ ) ): with open(snake_case__ , "rb" ) as f: _SCREAMING_SNAKE_CASE : List[Any] = pq.ParquetFile(snake_case__ ) try: for batch_idx, record_batch in enumerate( parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ): _SCREAMING_SNAKE_CASE : int = pa.Table.from_batches([record_batch] ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield F'''{file_idx}_{batch_idx}''', self._cast_table(snake_case__ ) except ValueError as e: logger.error(F'''Failed to read file \'{file}\' with error {type(snake_case__ )}: {e}''' ) raise
295
0
def __snake_case ( __UpperCamelCase : list ,__UpperCamelCase : list ,__UpperCamelCase : int ): """simple docstring""" A_ = len(__UpperCamelCase ) A_ = [[0] * n for i in range(__UpperCamelCase )] for i in range(__UpperCamelCase ): A_ = y_points[i] for i in range(2 ,__UpperCamelCase ): for j in range(__UpperCamelCase ,__UpperCamelCase ): A_ = ( (xa - x_points[j - i + 1]) * q[j][i - 1] - (xa - x_points[j]) * q[j - 1][i - 1] ) / (x_points[j] - x_points[j - i + 1]) return [q[n - 1][n - 1], q] if __name__ == "__main__": import doctest doctest.testmod()
86
import torch from diffusers import DDPMParallelScheduler from .test_schedulers import SchedulerCommonTest class _a ( snake_case_ ): """simple docstring""" _lowerCamelCase : Optional[Any] = (DDPMParallelScheduler,) def __A ( self : List[Any] , **UpperCAmelCase : Optional[int] ): A_ = { "num_train_timesteps": 1000, "beta_start": 0.0_001, "beta_end": 0.02, "beta_schedule": "linear", "variance_type": "fixed_small", "clip_sample": True, } config.update(**UpperCAmelCase ) return config def __A ( self : Optional[Any] ): for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=UpperCAmelCase ) def __A ( self : Dict ): for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=UpperCAmelCase , beta_end=UpperCAmelCase ) def __A ( self : int ): for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=UpperCAmelCase ) def __A ( self : Tuple ): for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=UpperCAmelCase ) def __A ( self : int ): for clip_sample in [True, False]: self.check_over_configs(clip_sample=UpperCAmelCase ) def __A ( self : Union[str, Any] ): self.check_over_configs(thresholding=UpperCAmelCase ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=UpperCAmelCase , prediction_type=UpperCAmelCase , sample_max_value=UpperCAmelCase , ) def __A ( self : Optional[int] ): for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=UpperCAmelCase ) def __A ( self : Tuple ): for t in [0, 500, 999]: self.check_over_forward(time_step=UpperCAmelCase ) def __A ( self : Tuple ): A_ = self.scheduler_classes[0] A_ = self.get_scheduler_config() A_ = scheduler_class(**UpperCAmelCase ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00_979 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5 def __A ( self : List[Any] ): A_ = self.scheduler_classes[0] A_ = self.get_scheduler_config() A_ = scheduler_class(**UpperCAmelCase ) A_ = len(UpperCAmelCase ) A_ = self.dummy_model() A_ = self.dummy_sample_deter A_ = self.dummy_sample_deter + 0.1 A_ = self.dummy_sample_deter - 0.1 A_ = samplea.shape[0] A_ = torch.stack([samplea, samplea, samplea] , dim=0 ) A_ = torch.arange(UpperCAmelCase )[0:3, None].repeat(1 , UpperCAmelCase ) A_ = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) ) A_ = scheduler.batch_step_no_noise(UpperCAmelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) ) A_ = torch.sum(torch.abs(UpperCAmelCase ) ) A_ = torch.mean(torch.abs(UpperCAmelCase ) ) assert abs(result_sum.item() - 1_153.1_833 ) < 1E-2 assert abs(result_mean.item() - 0.5_005 ) < 1E-3 def __A ( self : Tuple ): A_ = self.scheduler_classes[0] A_ = self.get_scheduler_config() A_ = scheduler_class(**UpperCAmelCase ) A_ = len(UpperCAmelCase ) A_ = self.dummy_model() A_ = self.dummy_sample_deter A_ = torch.manual_seed(0 ) for t in reversed(range(UpperCAmelCase ) ): # 1. predict noise residual A_ = model(UpperCAmelCase , UpperCAmelCase ) # 2. predict previous mean of sample x_t-1 A_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , generator=UpperCAmelCase ).prev_sample A_ = pred_prev_sample A_ = torch.sum(torch.abs(UpperCAmelCase ) ) A_ = torch.mean(torch.abs(UpperCAmelCase ) ) assert abs(result_sum.item() - 258.9_606 ) < 1E-2 assert abs(result_mean.item() - 0.3_372 ) < 1E-3 def __A ( self : Tuple ): A_ = self.scheduler_classes[0] A_ = self.get_scheduler_config(prediction_type="v_prediction" ) A_ = scheduler_class(**UpperCAmelCase ) A_ = len(UpperCAmelCase ) A_ = self.dummy_model() A_ = self.dummy_sample_deter A_ = torch.manual_seed(0 ) for t in reversed(range(UpperCAmelCase ) ): # 1. predict noise residual A_ = model(UpperCAmelCase , UpperCAmelCase ) # 2. predict previous mean of sample x_t-1 A_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , generator=UpperCAmelCase ).prev_sample A_ = pred_prev_sample A_ = torch.sum(torch.abs(UpperCAmelCase ) ) A_ = torch.mean(torch.abs(UpperCAmelCase ) ) assert abs(result_sum.item() - 202.0_296 ) < 1E-2 assert abs(result_mean.item() - 0.2_631 ) < 1E-3 def __A ( self : Union[str, Any] ): A_ = self.scheduler_classes[0] A_ = self.get_scheduler_config() A_ = scheduler_class(**UpperCAmelCase ) A_ = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=UpperCAmelCase ) A_ = scheduler.timesteps for i, timestep in enumerate(UpperCAmelCase ): if i == len(UpperCAmelCase ) - 1: A_ = -1 else: A_ = timesteps[i + 1] A_ = scheduler.previous_timestep(UpperCAmelCase ) A_ = prev_t.item() self.assertEqual(UpperCAmelCase , UpperCAmelCase ) def __A ( self : List[Any] ): A_ = self.scheduler_classes[0] A_ = self.get_scheduler_config() A_ = scheduler_class(**UpperCAmelCase ) A_ = [100, 87, 50, 51, 0] with self.assertRaises(UpperCAmelCase , msg="`custom_timesteps` must be in descending order." ): scheduler.set_timesteps(timesteps=UpperCAmelCase ) def __A ( self : List[Any] ): A_ = self.scheduler_classes[0] A_ = self.get_scheduler_config() A_ = scheduler_class(**UpperCAmelCase ) A_ = [100, 87, 50, 1, 0] A_ = len(UpperCAmelCase ) with self.assertRaises(UpperCAmelCase , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ): scheduler.set_timesteps(num_inference_steps=UpperCAmelCase , timesteps=UpperCAmelCase ) def __A ( self : Optional[Any] ): A_ = self.scheduler_classes[0] A_ = self.get_scheduler_config() A_ = scheduler_class(**UpperCAmelCase ) A_ = [scheduler.config.num_train_timesteps] with self.assertRaises( UpperCAmelCase , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ): scheduler.set_timesteps(timesteps=UpperCAmelCase )
86
1
'''simple docstring''' import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation def _lowerCAmelCase ( __magic_name__ : Tuple ) -> Tuple: lowercase : List[str] =384 if "tiny" in model_name: lowercase : List[str] =[3, 3, 9, 3] lowercase : Dict =[96, 192, 384, 768] if "small" in model_name: lowercase : Optional[int] =[3, 3, 27, 3] lowercase : Dict =[96, 192, 384, 768] if "base" in model_name: lowercase : Optional[Any] =[3, 3, 27, 3] lowercase : Optional[int] =[128, 256, 512, 1024] lowercase : Tuple =512 if "large" in model_name: lowercase : Dict =[3, 3, 27, 3] lowercase : str =[192, 384, 768, 1536] lowercase : Optional[Any] =768 if "xlarge" in model_name: lowercase : str =[3, 3, 27, 3] lowercase : List[Any] =[256, 512, 1024, 2048] lowercase : Any =1024 # set label information lowercase : str =150 lowercase : Any ='''huggingface/label-files''' lowercase : Tuple ='''ade20k-id2label.json''' lowercase : Union[str, Any] =json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='''dataset''' ) , '''r''' ) ) lowercase : Dict ={int(UpperCamelCase__ ): v for k, v in idalabel.items()} lowercase : Any ={v: k for k, v in idalabel.items()} lowercase : Tuple =ConvNextConfig( depths=UpperCamelCase__ , hidden_sizes=UpperCamelCase__ , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] ) lowercase : Optional[int] =UperNetConfig( backbone_config=UpperCamelCase__ , auxiliary_in_channels=UpperCamelCase__ , num_labels=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ , ) return config def _lowerCAmelCase ( __magic_name__ : Optional[Any] ) -> Tuple: lowercase : List[str] =[] # fmt: off # stem rename_keys.append(('''backbone.downsample_layers.0.0.weight''', '''backbone.embeddings.patch_embeddings.weight''') ) rename_keys.append(('''backbone.downsample_layers.0.0.bias''', '''backbone.embeddings.patch_embeddings.bias''') ) rename_keys.append(('''backbone.downsample_layers.0.1.weight''', '''backbone.embeddings.layernorm.weight''') ) rename_keys.append(('''backbone.downsample_layers.0.1.bias''', '''backbone.embeddings.layernorm.bias''') ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f'''backbone.stages.{i}.{j}.gamma''', f'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.norm.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.norm.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias''') ) if i > 0: rename_keys.append((f'''backbone.downsample_layers.{i}.0.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.weight''') ) rename_keys.append((f'''backbone.downsample_layers.{i}.0.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.bias''') ) rename_keys.append((f'''backbone.downsample_layers.{i}.1.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.weight''') ) rename_keys.append((f'''backbone.downsample_layers.{i}.1.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.bias''') ) rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') ) rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') ) # decode head rename_keys.extend( [ ('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''), ('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''), ('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''), ('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''), ] ) # fmt: on return rename_keys def _lowerCAmelCase ( __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Tuple ) -> str: lowercase : Union[str, Any] =dct.pop(UpperCamelCase__ ) lowercase : Optional[int] =val def _lowerCAmelCase ( __magic_name__ : Any , __magic_name__ : List[str] , __magic_name__ : List[Any] ) -> List[Any]: lowercase : str ={ '''upernet-convnext-tiny''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth''', '''upernet-convnext-small''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth''', '''upernet-convnext-base''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth''', '''upernet-convnext-large''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth''', '''upernet-convnext-xlarge''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth''', } lowercase : Optional[Any] =model_name_to_url[model_name] lowercase : Tuple =torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location='''cpu''' )['''state_dict'''] lowercase : Optional[Any] =get_upernet_config(UpperCamelCase__ ) lowercase : int =UperNetForSemanticSegmentation(UpperCamelCase__ ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): lowercase : Dict =state_dict.pop(UpperCamelCase__ ) if "bn" in key: lowercase : Any =key.replace('''bn''' , '''batch_norm''' ) lowercase : Optional[int] =val # rename keys lowercase : List[str] =create_rename_keys(UpperCamelCase__ ) for src, dest in rename_keys: rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) model.load_state_dict(UpperCamelCase__ ) # verify on image lowercase : Optional[int] ='''https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg''' lowercase : Tuple =Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ).convert('''RGB''' ) lowercase : List[Any] =SegformerImageProcessor() lowercase : Dict =processor(UpperCamelCase__ , return_tensors='''pt''' ).pixel_values with torch.no_grad(): lowercase : List[str] =model(UpperCamelCase__ ) if model_name == "upernet-convnext-tiny": lowercase : int =torch.tensor( [[-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.7_7_4_6, -8.7_7_4_6, -8.6_1_3_0]] ) elif model_name == "upernet-convnext-small": lowercase : List[Any] =torch.tensor( [[-8.8_2_3_6, -8.8_2_3_6, -8.6_7_7_1], [-8.8_2_3_6, -8.8_2_3_6, -8.6_7_7_1], [-8.7_6_3_8, -8.7_6_3_8, -8.6_2_4_0]] ) elif model_name == "upernet-convnext-base": lowercase : Union[str, Any] =torch.tensor( [[-8.8_5_5_8, -8.8_5_5_8, -8.6_9_0_5], [-8.8_5_5_8, -8.8_5_5_8, -8.6_9_0_5], [-8.7_6_6_9, -8.7_6_6_9, -8.6_0_2_1]] ) elif model_name == "upernet-convnext-large": lowercase : Union[str, Any] =torch.tensor( [[-8.6_6_6_0, -8.6_6_6_0, -8.6_2_1_0], [-8.6_6_6_0, -8.6_6_6_0, -8.6_2_1_0], [-8.6_3_1_0, -8.6_3_1_0, -8.5_9_6_4]] ) elif model_name == "upernet-convnext-xlarge": lowercase : Optional[int] =torch.tensor( [[-8.4_9_8_0, -8.4_9_8_0, -8.3_9_7_7], [-8.4_9_8_0, -8.4_9_8_0, -8.3_9_7_7], [-8.4_3_7_9, -8.4_3_7_9, -8.3_4_1_2]] ) print('''Logits:''' , outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3] , UpperCamelCase__ , atol=1E-4 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(UpperCamelCase__ ) print(f'''Saving processor to {pytorch_dump_folder_path}''' ) processor.save_pretrained(UpperCamelCase__ ) if push_to_hub: print(f'''Pushing model and processor for {model_name} to hub''' ) model.push_to_hub(f'''openmmlab/{model_name}''' ) processor.push_to_hub(f'''openmmlab/{model_name}''' ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""upernet-convnext-tiny""", type=str, choices=[f'''upernet-convnext-{size}''' for size in ["""tiny""", """small""", """base""", """large""", """xlarge"""]], help="""Name of the ConvNext UperNet model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) UpperCamelCase_ = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
702
'''simple docstring''' from collections import defaultdict def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : str ) -> bool: lowercase : Optional[int] =first_str.lower().strip() lowercase : Union[str, Any] =second_str.lower().strip() # Remove whitespace lowercase : Optional[int] =first_str.replace(''' ''' , '''''' ) lowercase : Optional[Any] =second_str.replace(''' ''' , '''''' ) # Strings of different lengths are not anagrams if len(__magic_name__ ) != len(__magic_name__ ): return False # Default values for count should be 0 lowercase : defaultdict[str, int] =defaultdict(__magic_name__ ) # For each character in input strings, # increment count in the corresponding for i in range(len(__magic_name__ ) ): count[first_str[i]] += 1 count[second_str[i]] -= 1 return all(_count == 0 for _count in count.values() ) if __name__ == "__main__": from doctest import testmod testmod() UpperCamelCase_ = input("""Enter the first string """).strip() UpperCamelCase_ = input("""Enter the second string """).strip() UpperCamelCase_ = check_anagrams(input_a, input_b) print(f'''{input_a} and {input_b} are {'' if status else 'not '}anagrams.''')
88
0
import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class lowerCamelCase( __snake_case ): '''simple docstring''' __magic_name__ = ['image_processor', 'tokenizer'] __magic_name__ = 'ViltImageProcessor' __magic_name__ = ('BertTokenizer', 'BertTokenizerFast') def __init__( self , snake_case_=None , snake_case_=None , **snake_case_ ): _A = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , snake_case_ , ) _A = kwargs.pop('feature_extractor' ) _A = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) super().__init__(snake_case_ , snake_case_ ) _A = self.image_processor def __call__( self , snake_case_ , snake_case_ = None , snake_case_ = True , snake_case_ = False , snake_case_ = None , snake_case_ = None , snake_case_ = 0 , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = True , snake_case_ = None , **snake_case_ , ): _A = self.tokenizer( text=snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , stride=snake_case_ , pad_to_multiple_of=snake_case_ , return_token_type_ids=snake_case_ , return_attention_mask=snake_case_ , return_overflowing_tokens=snake_case_ , return_special_tokens_mask=snake_case_ , return_offsets_mapping=snake_case_ , return_length=snake_case_ , verbose=snake_case_ , return_tensors=snake_case_ , **snake_case_ , ) # add pixel_values + pixel_mask _A = self.image_processor(snake_case_ , return_tensors=snake_case_ ) encoding.update(snake_case_ ) return encoding def lowerCAmelCase__ ( self , *snake_case_ , **snake_case_ ): return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ ) def lowerCAmelCase__ ( self , *snake_case_ , **snake_case_ ): return self.tokenizer.decode(*snake_case_ , **snake_case_ ) @property def lowerCAmelCase__ ( self ): _A = self.tokenizer.model_input_names _A = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def lowerCAmelCase__ ( self ): warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , snake_case_ , ) return self.image_processor_class @property def lowerCAmelCase__ ( self ): warnings.warn( '`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , snake_case_ , ) return self.image_processor
27
import numpy as np import pandas as pd from sklearn.preprocessing import MinMaxScaler from tensorflow.keras.layers import LSTM, Dense from tensorflow.keras.models import Sequential if __name__ == "__main__": lowerCamelCase_ : List[Any] = pd.read_csv("""sample_data.csv""", header=None) lowerCamelCase_ : Optional[Any] = df.shape[:1][0] # If you're using some other dataset input the target column lowerCamelCase_ : Union[str, Any] = df.iloc[:, 1:2] lowerCamelCase_ : str = actual_data.values.reshape(len_data, 1) lowerCamelCase_ : List[Any] = MinMaxScaler().fit_transform(actual_data) lowerCamelCase_ : List[str] = 10 lowerCamelCase_ : Tuple = 5 lowerCamelCase_ : Optional[Any] = 20 lowerCamelCase_ : List[Any] = len_data - periods * look_back lowerCamelCase_ : Union[str, Any] = actual_data[:division] lowerCamelCase_ : Dict = actual_data[division - look_back :] lowerCamelCase_ , lowerCamelCase_ : int = [], [] lowerCamelCase_ , lowerCamelCase_ : List[Any] = [], [] for i in range(0, len(train_data) - forward_days - look_back + 1): train_x.append(train_data[i : i + look_back]) train_y.append(train_data[i + look_back : i + look_back + forward_days]) for i in range(0, len(test_data) - forward_days - look_back + 1): test_x.append(test_data[i : i + look_back]) test_y.append(test_data[i + look_back : i + look_back + forward_days]) lowerCamelCase_ : Dict = np.array(train_x) lowerCamelCase_ : Union[str, Any] = np.array(test_x) lowerCamelCase_ : Optional[Any] = np.array([list(i.ravel()) for i in train_y]) lowerCamelCase_ : Any = np.array([list(i.ravel()) for i in test_y]) lowerCamelCase_ : int = Sequential() model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True)) model.add(LSTM(64, input_shape=(128, 1))) model.add(Dense(forward_days)) model.compile(loss="""mean_squared_error""", optimizer="""adam""") lowerCamelCase_ : Optional[int] = model.fit( x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4 ) lowerCamelCase_ : int = model.predict(x_test)
559
0
"""simple docstring""" def lowercase_ ( _snake_case = 3 ,_snake_case = 7 ,_snake_case = 1_000_000 ): SCREAMING_SNAKE_CASE__ : List[Any] = 0 SCREAMING_SNAKE_CASE__ : Tuple = 1 for current_denominator in range(1 ,limit + 1 ): SCREAMING_SNAKE_CASE__ : Dict = current_denominator * numerator // denominator if current_denominator % denominator == 0: current_numerator -= 1 if current_numerator * max_denominator > current_denominator * max_numerator: SCREAMING_SNAKE_CASE__ : Dict = current_numerator SCREAMING_SNAKE_CASE__ : int = current_denominator return max_numerator if __name__ == "__main__": print(solution(numerator=3, denominator=7, limit=1_0_0_0_0_0_0))
711
"""simple docstring""" import argparse import glob import logging import os import sys import time from collections import defaultdict from pathlib import Path from typing import Dict, List, Tuple import numpy as np import pytorch_lightning as pl import torch from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback from torch import nn from torch.utils.data import DataLoader from transformers import MBartTokenizer, TaForConditionalGeneration from transformers.models.bart.modeling_bart import shift_tokens_right from utils import ( ROUGE_KEYS, LegacySeqaSeqDataset, SeqaSeqDataset, assert_all_frozen, calculate_bleu, calculate_rouge, check_output_dir, flatten_list, freeze_embeds, freeze_params, get_git_info, label_smoothed_nll_loss, lmap, pickle_save, save_git_info, save_json, use_task_specific_params, ) # need the parent dir module sys.path.insert(2, str(Path(__file__).resolve().parents[1])) from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa UpperCAmelCase__ : Optional[int] = logging.getLogger(__name__) class lowerCAmelCase_ (a__ ): """simple docstring""" __UpperCamelCase : Optional[int] = '''summarization''' __UpperCamelCase : int = ['''loss'''] __UpperCamelCase : Dict = ROUGE_KEYS __UpperCamelCase : Any = '''rouge2''' def __init__(self , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> int: """simple docstring""" if hparams.sortish_sampler and hparams.gpus > 1: SCREAMING_SNAKE_CASE__ : Union[str, Any] = False elif hparams.max_tokens_per_batch is not None: if hparams.gpus > 1: raise NotImplementedError("""Dynamic Batch size does not work for multi-gpu training""" ) if hparams.sortish_sampler: raise ValueError("""--sortish_sampler and --max_tokens_per_batch may not be used simultaneously""" ) super().__init__(SCREAMING_SNAKE_CASE__ , num_labels=SCREAMING_SNAKE_CASE__ , mode=self.mode , **SCREAMING_SNAKE_CASE__ ) use_task_specific_params(self.model , """summarization""" ) save_git_info(self.hparams.output_dir ) SCREAMING_SNAKE_CASE__ : str = Path(self.output_dir ) / """metrics.json""" SCREAMING_SNAKE_CASE__ : Any = Path(self.output_dir ) / """hparams.pkl""" pickle_save(self.hparams , self.hparams_save_path ) SCREAMING_SNAKE_CASE__ : Any = 0 SCREAMING_SNAKE_CASE__ : Union[str, Any] = defaultdict(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Any = self.config.model_type SCREAMING_SNAKE_CASE__ : Dict = self.config.tgt_vocab_size if self.model_type == """fsmt""" else self.config.vocab_size SCREAMING_SNAKE_CASE__ : dict = { "data_dir": self.hparams.data_dir, "max_source_length": self.hparams.max_source_length, "prefix": self.model.config.prefix or "", } SCREAMING_SNAKE_CASE__ : Optional[Any] = { """train""": self.hparams.n_train, """val""": self.hparams.n_val, """test""": self.hparams.n_test, } SCREAMING_SNAKE_CASE__ : Optional[Any] = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()} SCREAMING_SNAKE_CASE__ : Any = { """train""": self.hparams.max_target_length, """val""": self.hparams.val_max_target_length, """test""": self.hparams.test_max_target_length, } assert self.target_lens["train"] <= self.target_lens["val"], F'''target_lens: {self.target_lens}''' assert self.target_lens["train"] <= self.target_lens["test"], F'''target_lens: {self.target_lens}''' if self.hparams.freeze_embeds: freeze_embeds(self.model ) if self.hparams.freeze_encoder: freeze_params(self.model.get_encoder() ) assert_all_frozen(self.model.get_encoder() ) SCREAMING_SNAKE_CASE__ : Tuple = get_git_info()["""repo_sha"""] SCREAMING_SNAKE_CASE__ : List[str] = hparams.num_workers SCREAMING_SNAKE_CASE__ : Optional[int] = None # default to config if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , SCREAMING_SNAKE_CASE__ ): SCREAMING_SNAKE_CASE__ : List[Any] = self.tokenizer.lang_code_to_id[hparams.tgt_lang] SCREAMING_SNAKE_CASE__ : List[Any] = self.decoder_start_token_id SCREAMING_SNAKE_CASE__ : List[str] = ( SeqaSeqDataset if hasattr(self.tokenizer , """prepare_seq2seq_batch""" ) else LegacySeqaSeqDataset ) SCREAMING_SNAKE_CASE__ : Optional[int] = False SCREAMING_SNAKE_CASE__ : int = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams if self.hparams.eval_max_gen_length is not None: SCREAMING_SNAKE_CASE__ : Optional[int] = self.hparams.eval_max_gen_length else: SCREAMING_SNAKE_CASE__ : List[str] = self.model.config.max_length SCREAMING_SNAKE_CASE__ : Optional[Any] = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Dict[str, List[str]]: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = { k: self.tokenizer.batch_decode(v.tolist() ) if """mask""" not in k else v.shape for k, v in batch.items() } save_json(SCREAMING_SNAKE_CASE__ , Path(self.output_dir ) / """text_batch.json""" ) save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / """tok_batch.json""" ) SCREAMING_SNAKE_CASE__ : str = True return readable_batch def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> int: """simple docstring""" return self.model(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.tokenizer.batch_decode( SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ ) return lmap(str.strip , SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = self.tokenizer.pad_token_id SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = batch["""input_ids"""], batch["""attention_mask"""] SCREAMING_SNAKE_CASE__ : Dict = batch["""labels"""] if isinstance(self.model , SCREAMING_SNAKE_CASE__ ): SCREAMING_SNAKE_CASE__ : int = self.model._shift_right(SCREAMING_SNAKE_CASE__ ) else: SCREAMING_SNAKE_CASE__ : Optional[int] = shift_tokens_right(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero SCREAMING_SNAKE_CASE__ : List[Any] = decoder_input_ids self.save_readable_batch(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Dict = self(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , decoder_input_ids=SCREAMING_SNAKE_CASE__ , use_cache=SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Optional[int] = outputs["""logits"""] if self.hparams.label_smoothing == 0: # Same behavior as modeling_bart.py, besides ignoring pad_token_id SCREAMING_SNAKE_CASE__ : Dict = nn.CrossEntropyLoss(ignore_index=SCREAMING_SNAKE_CASE__ ) assert lm_logits.shape[-1] == self.vocab_size SCREAMING_SNAKE_CASE__ : Optional[int] = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) ) else: SCREAMING_SNAKE_CASE__ : int = nn.functional.log_softmax(SCREAMING_SNAKE_CASE__ , dim=-1 ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = label_smoothed_nll_loss( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.hparams.label_smoothing , ignore_index=SCREAMING_SNAKE_CASE__ ) return (loss,) @property def __magic_name__ (self ) -> int: """simple docstring""" return self.tokenizer.pad_token_id def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = self._step(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Dict = dict(zip(self.loss_names , SCREAMING_SNAKE_CASE__ ) ) # tokens per batch SCREAMING_SNAKE_CASE__ : List[Any] = batch["""input_ids"""].ne(self.pad ).sum() + batch["""labels"""].ne(self.pad ).sum() SCREAMING_SNAKE_CASE__ : int = batch["""input_ids"""].shape[0] SCREAMING_SNAKE_CASE__ : int = batch["""input_ids"""].eq(self.pad ).sum() SCREAMING_SNAKE_CASE__ : int = batch["""input_ids"""].eq(self.pad ).float().mean() # TODO(SS): make a wandb summary metric for this return {"loss": loss_tensors[0], "log": logs} def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Dict: """simple docstring""" return self._generative_step(SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__="val" ) -> Dict: """simple docstring""" self.step_count += 1 SCREAMING_SNAKE_CASE__ : Optional[int] = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names} SCREAMING_SNAKE_CASE__ : Any = losses["""loss"""] SCREAMING_SNAKE_CASE__ : Dict = { k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["""gen_time""", """gen_len"""] } SCREAMING_SNAKE_CASE__ : str = ( generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric] ) SCREAMING_SNAKE_CASE__ : torch.FloatTensor = torch.tensor(SCREAMING_SNAKE_CASE__ ).type_as(SCREAMING_SNAKE_CASE__ ) generative_metrics.update({k: v.item() for k, v in losses.items()} ) losses.update(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Optional[int] = {F'''{prefix}_avg_{k}''': x for k, x in losses.items()} SCREAMING_SNAKE_CASE__ : List[Any] = self.step_count self.metrics[prefix].append(SCREAMING_SNAKE_CASE__ ) # callback writes this to self.metrics_save_path SCREAMING_SNAKE_CASE__ : Optional[Any] = flatten_list([x["""preds"""] for x in outputs] ) return { "log": all_metrics, "preds": preds, F'''{prefix}_loss''': loss, F'''{prefix}_{self.val_metric}''': metric_tensor, } def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Dict: """simple docstring""" return calculate_rouge(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = time.time() # parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens') SCREAMING_SNAKE_CASE__ : int = self.model.generate( batch["""input_ids"""] , attention_mask=batch["""attention_mask"""] , use_cache=SCREAMING_SNAKE_CASE__ , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , ) SCREAMING_SNAKE_CASE__ : Optional[int] = (time.time() - ta) / batch["""input_ids"""].shape[0] SCREAMING_SNAKE_CASE__ : List[str] = self.ids_to_clean_text(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : List[str] = self.ids_to_clean_text(batch["""labels"""] ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._step(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = dict(zip(self.loss_names , SCREAMING_SNAKE_CASE__ ) ) SCREAMING_SNAKE_CASE__ : Dict = self.calc_generative_metrics(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : int = np.mean(lmap(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) base_metrics.update(gen_time=SCREAMING_SNAKE_CASE__ , gen_len=SCREAMING_SNAKE_CASE__ , preds=SCREAMING_SNAKE_CASE__ , target=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) return base_metrics def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]: """simple docstring""" return self._generative_step(SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> List[str]: """simple docstring""" return self.validation_epoch_end(SCREAMING_SNAKE_CASE__ , prefix="""test""" ) def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> SeqaSeqDataset: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = self.n_obs[type_path] SCREAMING_SNAKE_CASE__ : Optional[Any] = self.target_lens[type_path] SCREAMING_SNAKE_CASE__ : Optional[Any] = self.dataset_class( self.tokenizer , type_path=SCREAMING_SNAKE_CASE__ , n_obs=SCREAMING_SNAKE_CASE__ , max_target_length=SCREAMING_SNAKE_CASE__ , **self.dataset_kwargs , ) return dataset def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = False ) -> DataLoader: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = self.get_dataset(SCREAMING_SNAKE_CASE__ ) if self.hparams.sortish_sampler and type_path != "test" and type_path != "val": SCREAMING_SNAKE_CASE__ : Tuple = dataset.make_sortish_sampler(SCREAMING_SNAKE_CASE__ , distributed=self.hparams.gpus > 1 ) return DataLoader( SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , collate_fn=dataset.collate_fn , shuffle=SCREAMING_SNAKE_CASE__ , num_workers=self.num_workers , sampler=SCREAMING_SNAKE_CASE__ , ) elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val": SCREAMING_SNAKE_CASE__ : int = dataset.make_dynamic_sampler( self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 ) return DataLoader( SCREAMING_SNAKE_CASE__ , batch_sampler=SCREAMING_SNAKE_CASE__ , collate_fn=dataset.collate_fn , num_workers=self.num_workers , ) else: return DataLoader( SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , collate_fn=dataset.collate_fn , shuffle=SCREAMING_SNAKE_CASE__ , num_workers=self.num_workers , sampler=SCREAMING_SNAKE_CASE__ , ) def __magic_name__ (self ) -> DataLoader: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = self.get_dataloader("""train""" , batch_size=self.hparams.train_batch_size , shuffle=SCREAMING_SNAKE_CASE__ ) return dataloader def __magic_name__ (self ) -> DataLoader: """simple docstring""" return self.get_dataloader("""val""" , batch_size=self.hparams.eval_batch_size ) def __magic_name__ (self ) -> DataLoader: """simple docstring""" return self.get_dataloader("""test""" , batch_size=self.hparams.eval_batch_size ) @staticmethod def __magic_name__ (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int: """simple docstring""" BaseTransformer.add_model_specific_args(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) add_generic_args(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) parser.add_argument( """--max_source_length""" , default=10_24 , type=SCREAMING_SNAKE_CASE__ , help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) , ) parser.add_argument( """--max_target_length""" , default=56 , type=SCREAMING_SNAKE_CASE__ , help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) , ) parser.add_argument( """--val_max_target_length""" , default=1_42 , type=SCREAMING_SNAKE_CASE__ , help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) , ) parser.add_argument( """--test_max_target_length""" , default=1_42 , type=SCREAMING_SNAKE_CASE__ , help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) , ) parser.add_argument("""--freeze_encoder""" , action="""store_true""" ) parser.add_argument("""--freeze_embeds""" , action="""store_true""" ) parser.add_argument("""--sortish_sampler""" , action="""store_true""" , default=SCREAMING_SNAKE_CASE__ ) parser.add_argument("""--overwrite_output_dir""" , action="""store_true""" , default=SCREAMING_SNAKE_CASE__ ) parser.add_argument("""--max_tokens_per_batch""" , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ ) parser.add_argument("""--logger_name""" , type=SCREAMING_SNAKE_CASE__ , choices=["""default""", """wandb""", """wandb_shared"""] , default="""default""" ) parser.add_argument("""--n_train""" , type=SCREAMING_SNAKE_CASE__ , default=-1 , required=SCREAMING_SNAKE_CASE__ , help="""# examples. -1 means use all.""" ) parser.add_argument("""--n_val""" , type=SCREAMING_SNAKE_CASE__ , default=5_00 , required=SCREAMING_SNAKE_CASE__ , help="""# examples. -1 means use all.""" ) parser.add_argument("""--n_test""" , type=SCREAMING_SNAKE_CASE__ , default=-1 , required=SCREAMING_SNAKE_CASE__ , help="""# examples. -1 means use all.""" ) parser.add_argument( """--task""" , type=SCREAMING_SNAKE_CASE__ , default="""summarization""" , required=SCREAMING_SNAKE_CASE__ , help="""# examples. -1 means use all.""" ) parser.add_argument("""--label_smoothing""" , type=SCREAMING_SNAKE_CASE__ , default=0.0 , required=SCREAMING_SNAKE_CASE__ ) parser.add_argument("""--src_lang""" , type=SCREAMING_SNAKE_CASE__ , default="""""" , required=SCREAMING_SNAKE_CASE__ ) parser.add_argument("""--tgt_lang""" , type=SCREAMING_SNAKE_CASE__ , default="""""" , required=SCREAMING_SNAKE_CASE__ ) parser.add_argument("""--eval_beams""" , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ ) parser.add_argument( """--val_metric""" , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , choices=["""bleu""", """rouge2""", """loss""", None] ) parser.add_argument("""--eval_max_gen_length""" , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , help="""never generate more than n tokens""" ) parser.add_argument("""--save_top_k""" , type=SCREAMING_SNAKE_CASE__ , default=1 , required=SCREAMING_SNAKE_CASE__ , help="""How many checkpoints to save""" ) parser.add_argument( """--early_stopping_patience""" , type=SCREAMING_SNAKE_CASE__ , default=-1 , required=SCREAMING_SNAKE_CASE__ , help=( """-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So""" """ val_check_interval will effect it.""" ) , ) return parser class lowerCAmelCase_ (a__ ): """simple docstring""" __UpperCamelCase : Union[str, Any] = '''translation''' __UpperCamelCase : Optional[Any] = ['''loss'''] __UpperCamelCase : Optional[int] = ['''bleu'''] __UpperCamelCase : Tuple = '''bleu''' def __init__(self , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> List[str]: """simple docstring""" super().__init__(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = hparams.src_lang SCREAMING_SNAKE_CASE__ : int = hparams.tgt_lang def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> dict: """simple docstring""" return calculate_bleu(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def lowercase_ ( _snake_case ,_snake_case=None ): Path(args.output_dir ).mkdir(exist_ok=_snake_case ) check_output_dir(_snake_case ,expected_items=3 ) if model is None: if "summarization" in args.task: SCREAMING_SNAKE_CASE__ : SummarizationModule = SummarizationModule(_snake_case ) else: SCREAMING_SNAKE_CASE__ : SummarizationModule = TranslationModule(_snake_case ) SCREAMING_SNAKE_CASE__ : List[str] = Path(args.data_dir ).name if ( args.logger_name == "default" or args.fast_dev_run or str(args.output_dir ).startswith("""/tmp""" ) or str(args.output_dir ).startswith("""/var""" ) ): SCREAMING_SNAKE_CASE__ : List[Any] = True # don't pollute wandb logs unnecessarily elif args.logger_name == "wandb": from pytorch_lightning.loggers import WandbLogger SCREAMING_SNAKE_CASE__ : Optional[int] = os.environ.get("""WANDB_PROJECT""" ,_snake_case ) SCREAMING_SNAKE_CASE__ : Optional[Any] = WandbLogger(name=model.output_dir.name ,project=_snake_case ) elif args.logger_name == "wandb_shared": from pytorch_lightning.loggers import WandbLogger SCREAMING_SNAKE_CASE__ : Tuple = WandbLogger(name=model.output_dir.name ,project=f'''hf_{dataset}''' ) if args.early_stopping_patience >= 0: SCREAMING_SNAKE_CASE__ : List[str] = get_early_stopping_callback(model.val_metric ,args.early_stopping_patience ) else: SCREAMING_SNAKE_CASE__ : Tuple = False SCREAMING_SNAKE_CASE__ : Any = args.val_metric == """loss""" SCREAMING_SNAKE_CASE__ : pl.Trainer = generic_train( _snake_case ,_snake_case ,logging_callback=SeqaSeqLoggingCallback() ,checkpoint_callback=get_checkpoint_callback( args.output_dir ,model.val_metric ,args.save_top_k ,_snake_case ) ,early_stopping_callback=_snake_case ,logger=_snake_case ,) pickle_save(model.hparams ,model.output_dir / """hparams.pkl""" ) if not args.do_predict: return model SCREAMING_SNAKE_CASE__ : List[str] = """""" SCREAMING_SNAKE_CASE__ : List[Any] = sorted(glob.glob(os.path.join(args.output_dir ,"""*.ckpt""" ) ,recursive=_snake_case ) ) if checkpoints: SCREAMING_SNAKE_CASE__ : Optional[int] = checkpoints[-1] SCREAMING_SNAKE_CASE__ : Any = checkpoints[-1] trainer.logger.log_hyperparams(model.hparams ) # test() without a model tests using the best checkpoint automatically trainer.test() return model if __name__ == "__main__": UpperCAmelCase__ : Optional[int] = argparse.ArgumentParser() UpperCAmelCase__ : Optional[int] = pl.Trainer.add_argparse_args(parser) UpperCAmelCase__ : List[Any] = SummarizationModule.add_model_specific_args(parser, os.getcwd()) UpperCAmelCase__ : Any = parser.parse_args() main(args)
545
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCamelCase_ = {"configuration_sew": ["SEW_PRETRAINED_CONFIG_ARCHIVE_MAP", "SEWConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ "SEW_PRETRAINED_MODEL_ARCHIVE_LIST", "SEWForCTC", "SEWForSequenceClassification", "SEWModel", "SEWPreTrainedModel", ] if TYPE_CHECKING: from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_sew import ( SEW_PRETRAINED_MODEL_ARCHIVE_LIST, SEWForCTC, SEWForSequenceClassification, SEWModel, SEWPreTrainedModel, ) else: import sys lowerCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
498
"""simple docstring""" import argparse import json import os from collections import OrderedDict import numpy as np import tensorflow as tf import torch def __lowerCamelCase ( a_ : Dict ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE :Optional[int] = os.path.join(args.tf_model_dir , '''parameters.json''' ) __SCREAMING_SNAKE_CASE :Dict = json.loads(open(a_ ).read() ) if not params: raise ValueError( f'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''' ) if not args.output.endswith('''.pt''' ): __SCREAMING_SNAKE_CASE :Tuple = args.output + '''.pt''' __SCREAMING_SNAKE_CASE :Union[str, Any] = OrderedDict() with tf.device('''/CPU:0''' ): __SCREAMING_SNAKE_CASE :Optional[int] = tf.train.load_checkpoint(args.tf_model_dir ) __SCREAMING_SNAKE_CASE :Tuple = reader.get_variable_to_shape_map() for key_name in shapes.keys(): __SCREAMING_SNAKE_CASE :str = reader.get_tensor(a_ ).astype(np.floataa ) if key_name.endswith('''/adam_m''' ) or key_name.endswith('''/adam_v''' ): continue if key_name.startswith('''pasts/''' ): if key_name.startswith('''pasts/mlp''' ): __SCREAMING_SNAKE_CASE :Optional[Any] = int(key_name[9] ) elif key_name.startswith('''pasts/out''' ): __SCREAMING_SNAKE_CASE :List[str] = 8 __SCREAMING_SNAKE_CASE :List[str] = '''model.sqout.%d.weight''' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time __SCREAMING_SNAKE_CASE :Optional[int] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix __SCREAMING_SNAKE_CASE :Any = torch.tensor(a_ ) elif key_name.startswith('''model/moe''' ): __SCREAMING_SNAKE_CASE :List[Any] = int(key_name[9:].split('''/''' )[0] ) if key_name.endswith('''/switch_gating/kernel''' ): __SCREAMING_SNAKE_CASE :Union[str, Any] = '''model.blocks.%d.feed_forward.mlp.router.classifier.weight''' % player __SCREAMING_SNAKE_CASE :str = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix __SCREAMING_SNAKE_CASE :Optional[int] = torch.tensor(a_ ) elif key_name.endswith('''/softmlp/kernel''' ): __SCREAMING_SNAKE_CASE :Tuple = '''model.blocks.%d.feed_forward.soft_bypass_mlp.weight''' % player __SCREAMING_SNAKE_CASE :Union[str, Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix __SCREAMING_SNAKE_CASE :List[Any] = torch.tensor(a_ ) elif key_name.endswith('''/wo/kernel''' ) or key_name.endswith('''/wi/kernel''' ): __SCREAMING_SNAKE_CASE :Optional[Any] = key_name[-9:-7] for i in range(16 ): __SCREAMING_SNAKE_CASE :int = '''model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight''' % (player, i, nlayer) __SCREAMING_SNAKE_CASE :List[Any] = ( vnp[i].transpose([1, 0] ).copy() ) # In Mesh-Tensorflow, it is one array, so it is divided __SCREAMING_SNAKE_CASE :Optional[int] = torch.tensor(a_ ) elif key_name.startswith('''model/mlp''' ): __SCREAMING_SNAKE_CASE :Optional[int] = int(key_name[9:].split('''/''' )[0] ) if key_name.endswith('''/p1/kernel''' ): __SCREAMING_SNAKE_CASE :Optional[Any] = '''model.blocks.%d.feed_forward.mlp.wi.weight''' % player __SCREAMING_SNAKE_CASE :Optional[int] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix __SCREAMING_SNAKE_CASE :str = torch.tensor(a_ ) elif key_name.endswith('''/p1/bias''' ): __SCREAMING_SNAKE_CASE :List[Any] = '''model.blocks.%d.feed_forward.mlp.wi.bias''' % player __SCREAMING_SNAKE_CASE :List[str] = vnp.copy() # same because it is one dimensional __SCREAMING_SNAKE_CASE :int = torch.tensor(a_ ) elif key_name.endswith('''/p2/kernel''' ): __SCREAMING_SNAKE_CASE :Optional[Any] = '''model.blocks.%d.feed_forward.mlp.wo.weight''' % player __SCREAMING_SNAKE_CASE :Tuple = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix __SCREAMING_SNAKE_CASE :Dict = torch.tensor(a_ ) elif key_name.endswith('''/p2/bias''' ): __SCREAMING_SNAKE_CASE :Dict = '''model.blocks.%d.feed_forward.mlp.wo.bias''' % player __SCREAMING_SNAKE_CASE :Optional[int] = vnp.copy() # same because it is one dimensional __SCREAMING_SNAKE_CASE :int = torch.tensor(a_ ) elif key_name.startswith('''model/ln''' ): __SCREAMING_SNAKE_CASE :Tuple = int(key_name[8:].split('''/''' )[0] ) if key_name.endswith('''/b''' ): __SCREAMING_SNAKE_CASE :Optional[Any] = '''model.blocks.%d.feed_forward.norm.bias''' % player __SCREAMING_SNAKE_CASE :Dict = vnp.copy() # same because it is one dimensional __SCREAMING_SNAKE_CASE :List[str] = torch.tensor(a_ ) elif key_name.endswith('''/g''' ): __SCREAMING_SNAKE_CASE :Any = '''model.blocks.%d.feed_forward.norm.weight''' % player __SCREAMING_SNAKE_CASE :List[Any] = vnp.copy() # same because it is one dimensional __SCREAMING_SNAKE_CASE :Tuple = torch.tensor(a_ ) elif key_name.startswith('''model/att''' ): __SCREAMING_SNAKE_CASE :Tuple = int(key_name[9:].split('''/''' )[0] ) if key_name.endswith('''/qkv/kernel''' ): __SCREAMING_SNAKE_CASE :Any = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum __SCREAMING_SNAKE_CASE :Union[str, Any] = state[:, 0, :, :] __SCREAMING_SNAKE_CASE :Dict = state[:, 1, :, :] __SCREAMING_SNAKE_CASE :Union[str, Any] = state[:, 2, :, :] __SCREAMING_SNAKE_CASE :Optional[Any] = ( state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix __SCREAMING_SNAKE_CASE :Union[str, Any] = ( state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix __SCREAMING_SNAKE_CASE :Tuple = ( state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix __SCREAMING_SNAKE_CASE :Any = '''model.blocks.%d.self_attn.self_attn.q_proj.weight''' % player __SCREAMING_SNAKE_CASE :List[Any] = torch.tensor(a_ ) __SCREAMING_SNAKE_CASE :int = '''model.blocks.%d.self_attn.self_attn.k_proj.weight''' % player __SCREAMING_SNAKE_CASE :str = torch.tensor(a_ ) __SCREAMING_SNAKE_CASE :int = '''model.blocks.%d.self_attn.self_attn.v_proj.weight''' % player __SCREAMING_SNAKE_CASE :str = torch.tensor(a_ ) elif key_name.endswith('''/o/kernel''' ): __SCREAMING_SNAKE_CASE :Union[str, Any] = '''model.blocks.%d.self_attn.self_attn.out_proj.weight''' % player __SCREAMING_SNAKE_CASE :Any = ( vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy() ) # Mesh-Tensorflow is a diagonal matrix __SCREAMING_SNAKE_CASE :Union[str, Any] = torch.tensor(a_ ) elif key_name.startswith('''model/an''' ): __SCREAMING_SNAKE_CASE :Optional[int] = int(key_name[8:].split('''/''' )[0] ) if key_name.endswith('''/b''' ): __SCREAMING_SNAKE_CASE :List[Any] = '''model.blocks.%d.self_attn.norm.bias''' % player __SCREAMING_SNAKE_CASE :Tuple = vnp.copy() # same because it is one dimensional __SCREAMING_SNAKE_CASE :Union[str, Any] = torch.tensor(a_ ) elif key_name.endswith('''/g''' ): __SCREAMING_SNAKE_CASE :Optional[int] = '''model.blocks.%d.self_attn.norm.weight''' % player __SCREAMING_SNAKE_CASE :List[str] = vnp.copy() # same because it is one dimensional __SCREAMING_SNAKE_CASE :Tuple = torch.tensor(a_ ) elif ( key_name.startswith('''model/wte''' ) or key_name.startswith('''model/wpe''' ) or key_name.startswith('''model/ete''' ) ): __SCREAMING_SNAKE_CASE :str = {'''wte''': '''embed_tokens''', '''wpe''': '''position_embeddings''', '''ete''': '''extra_position_embeddings'''}[ key_name[-3:] ] __SCREAMING_SNAKE_CASE :Optional[int] = '''model.%s.weight''' % nlayer __SCREAMING_SNAKE_CASE :int = vnp.copy() # same in embedded __SCREAMING_SNAKE_CASE :str = torch.tensor(a_ ) if key_name.startswith('''model/wte''' ): __SCREAMING_SNAKE_CASE :Union[str, Any] = '''lm_head.weight''' __SCREAMING_SNAKE_CASE :Optional[Any] = vnp.copy() # same in embedded __SCREAMING_SNAKE_CASE :List[str] = torch.tensor(a_ ) elif key_name.startswith('''model/wob''' ): __SCREAMING_SNAKE_CASE :Any = '''final_logits_bias''' __SCREAMING_SNAKE_CASE :int = vnp.copy() # same in embedded __SCREAMING_SNAKE_CASE :List[Any] = state.reshape((1, -1) ) __SCREAMING_SNAKE_CASE :str = torch.tensor(a_ ) elif key_name == "model/dense/kernel": __SCREAMING_SNAKE_CASE :int = '''model.last_project.weight''' __SCREAMING_SNAKE_CASE :Any = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix __SCREAMING_SNAKE_CASE :Dict = torch.tensor(a_ ) elif key_name == "model/dense_1/bias": __SCREAMING_SNAKE_CASE :List[str] = '''model.last_project.bias''' __SCREAMING_SNAKE_CASE :Any = vnp.copy() # same because it is one dimensional __SCREAMING_SNAKE_CASE :Optional[Any] = torch.tensor(a_ ) torch.save(a_ , args.output ) if __name__ == "__main__": lowerCamelCase_ = argparse.ArgumentParser( description="model converter.", formatter_class=argparse.ArgumentDefaultsHelpFormatter ) parser.add_argument("--tf_model_dir", metavar="PATH", type=str, required=True, help="import model") parser.add_argument("--output", metavar="PATH", type=str, required=True, help="output model") lowerCamelCase_ = parser.parse_args() convert_tf_gptsan_to_pt(args)
498
1
import itertools import os import random import tempfile import unittest import numpy as np from transformers import TvltFeatureExtractor, is_datasets_available from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_torch_available(): import torch if is_datasets_available(): from datasets import load_dataset a = random.Random() def UpperCamelCase_( __magic_name__ : Tuple , __magic_name__ : str=1.0 , __magic_name__ : Dict=None , __magic_name__ : int=None ): """simple docstring""" if rng is None: _lowerCAmelCase :str = global_rng _lowerCAmelCase :Tuple = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class UpperCAmelCase_ (unittest.TestCase ): """simple docstring""" def __init__( self: List[Any] , _UpperCAmelCase: int , _UpperCAmelCase: List[str]=7 , _UpperCAmelCase: Optional[Any]=400 , _UpperCAmelCase: Optional[int]=2000 , _UpperCAmelCase: Optional[int]=2048 , _UpperCAmelCase: List[Any]=128 , _UpperCAmelCase: Optional[Any]=1 , _UpperCAmelCase: List[Any]=512 , _UpperCAmelCase: List[Any]=30 , _UpperCAmelCase: Optional[Any]=4_4100 , ): _lowerCAmelCase :List[str] = parent _lowerCAmelCase :Union[str, Any] = batch_size _lowerCAmelCase :Optional[int] = min_seq_length _lowerCAmelCase :Union[str, Any] = max_seq_length _lowerCAmelCase :Any = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) _lowerCAmelCase :List[str] = spectrogram_length _lowerCAmelCase :Any = feature_size _lowerCAmelCase :int = num_audio_channels _lowerCAmelCase :Dict = hop_length _lowerCAmelCase :Optional[Any] = chunk_length _lowerCAmelCase :List[Any] = sampling_rate def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ): return { "spectrogram_length": self.spectrogram_length, "feature_size": self.feature_size, "num_audio_channels": self.num_audio_channels, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "sampling_rate": self.sampling_rate, } def SCREAMING_SNAKE_CASE__ ( self: str , _UpperCAmelCase: Optional[Any]=False , _UpperCAmelCase: List[Any]=False ): def _flatten(_UpperCAmelCase: str ): return list(itertools.chain(*_UpperCAmelCase ) ) if equal_length: _lowerCAmelCase :Tuple = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size _lowerCAmelCase :Optional[int] = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: _lowerCAmelCase :Union[str, Any] = [np.asarray(_UpperCAmelCase ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class UpperCAmelCase_ (snake_case__ , unittest.TestCase ): """simple docstring""" lowerCamelCase : Optional[Any] = TvltFeatureExtractor def SCREAMING_SNAKE_CASE__ ( self: Dict ): _lowerCAmelCase :Any = TvltFeatureExtractionTester(self ) def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ): _lowerCAmelCase :Optional[int] = self.feature_extraction_class(**self.feat_extract_dict ) self.assertTrue(hasattr(_UpperCAmelCase , 'spectrogram_length' ) ) self.assertTrue(hasattr(_UpperCAmelCase , 'feature_size' ) ) self.assertTrue(hasattr(_UpperCAmelCase , 'num_audio_channels' ) ) self.assertTrue(hasattr(_UpperCAmelCase , 'hop_length' ) ) self.assertTrue(hasattr(_UpperCAmelCase , 'chunk_length' ) ) self.assertTrue(hasattr(_UpperCAmelCase , 'sampling_rate' ) ) def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ): _lowerCAmelCase :Optional[int] = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: _lowerCAmelCase :Dict = feat_extract_first.save_pretrained(_UpperCAmelCase )[0] check_json_file_has_correct_format(_UpperCAmelCase ) _lowerCAmelCase :Dict = self.feature_extraction_class.from_pretrained(_UpperCAmelCase ) _lowerCAmelCase :Optional[int] = feat_extract_first.to_dict() _lowerCAmelCase :Optional[Any] = feat_extract_second.to_dict() _lowerCAmelCase :List[str] = dict_first.pop('mel_filters' ) _lowerCAmelCase :int = dict_second.pop('mel_filters' ) self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase ) ) self.assertEqual(_UpperCAmelCase , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: str ): _lowerCAmelCase :Any = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: _lowerCAmelCase :List[str] = os.path.join(_UpperCAmelCase , 'feat_extract.json' ) feat_extract_first.to_json_file(_UpperCAmelCase ) _lowerCAmelCase :Any = self.feature_extraction_class.from_json_file(_UpperCAmelCase ) _lowerCAmelCase :List[Any] = feat_extract_first.to_dict() _lowerCAmelCase :Tuple = feat_extract_second.to_dict() _lowerCAmelCase :str = dict_first.pop('mel_filters' ) _lowerCAmelCase :Any = dict_second.pop('mel_filters' ) self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase ) ) self.assertEqual(_UpperCAmelCase , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: List[Any] ): # Initialize feature_extractor _lowerCAmelCase :Dict = self.feature_extraction_class(**self.feat_extract_dict ) # create three inputs of length 800, 1000, and 1200 _lowerCAmelCase :str = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] _lowerCAmelCase :List[str] = [np.asarray(_UpperCAmelCase ) for speech_input in speech_inputs] # Test not batched input _lowerCAmelCase :List[Any] = feature_extractor(np_speech_inputs[0] , return_tensors='np' , sampling_rate=4_4100 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test batched _lowerCAmelCase :Optional[int] = feature_extractor(_UpperCAmelCase , return_tensors='np' , sampling_rate=4_4100 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test audio masking _lowerCAmelCase :Optional[int] = feature_extractor( _UpperCAmelCase , return_tensors='np' , sampling_rate=4_4100 , mask_audio=_UpperCAmelCase ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test 2-D numpy arrays are batched. _lowerCAmelCase :Dict = [floats_list((1, x) )[0] for x in (800, 800, 800)] _lowerCAmelCase :Union[str, Any] = np.asarray(_UpperCAmelCase ) _lowerCAmelCase :Optional[Any] = feature_extractor(_UpperCAmelCase , return_tensors='np' , sampling_rate=4_4100 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) def SCREAMING_SNAKE_CASE__ ( self: List[str] , _UpperCAmelCase: List[str] ): _lowerCAmelCase :Dict = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' ) # automatic decoding with librispeech _lowerCAmelCase :Tuple = ds.sort('id' ).select(range(_UpperCAmelCase ) )[:num_samples]['audio'] return [x["array"] for x in speech_samples] def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ): _lowerCAmelCase :Union[str, Any] = self._load_datasamples(1 ) _lowerCAmelCase :Optional[int] = TvltFeatureExtractor() _lowerCAmelCase :Union[str, Any] = feature_extractor(_UpperCAmelCase , return_tensors='pt' ).audio_values self.assertEquals(audio_values.shape , (1, 1, 192, 128) ) _lowerCAmelCase :Optional[int] = torch.tensor([[-0.3_0_3_2, -0.2_7_0_8], [-0.4_4_3_4, -0.4_0_0_7]] ) self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , _UpperCAmelCase , atol=1e-4 ) )
704
from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import torch from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available @dataclass class UpperCAmelCase_ (snake_case__ ): """simple docstring""" lowerCamelCase : Union[List[np.ndarray], torch.FloatTensor] try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_text_to_video_synth import TextToVideoSDPipeline from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401 from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
382
0
from __future__ import annotations from collections import deque from collections.abc import Sequence from dataclasses import dataclass from typing import Any @dataclass class _UpperCamelCase : '''simple docstring''' a_ : int a_ : Node | None = None a_ : Node | None = None def _UpperCAmelCase ( ): """simple docstring""" __lowerCamelCase : str = Node(1 ) __lowerCamelCase : Union[str, Any] = Node(2 ) __lowerCamelCase : Tuple = Node(3 ) __lowerCamelCase : Tuple = Node(4 ) __lowerCamelCase : Tuple = Node(5 ) return tree def _UpperCAmelCase ( UpperCAmelCase : Node | None ): """simple docstring""" return [root.data, *preorder(root.left ), *preorder(root.right )] if root else [] def _UpperCAmelCase ( UpperCAmelCase : Node | None ): """simple docstring""" return postorder(root.left ) + postorder(root.right ) + [root.data] if root else [] def _UpperCAmelCase ( UpperCAmelCase : Node | None ): """simple docstring""" return [*inorder(root.left ), root.data, *inorder(root.right )] if root else [] def _UpperCAmelCase ( UpperCAmelCase : Node | None ): """simple docstring""" return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0 def _UpperCAmelCase ( UpperCAmelCase : Node | None ): """simple docstring""" __lowerCamelCase : list[Any] = [] if root is None: return output __lowerCamelCase : str = deque([root] ) while process_queue: __lowerCamelCase : Union[str, Any] = process_queue.popleft() output.append(node.data ) if node.left: process_queue.append(node.left ) if node.right: process_queue.append(node.right ) return output def _UpperCAmelCase ( UpperCAmelCase : Node | None , UpperCAmelCase : int ): """simple docstring""" __lowerCamelCase : list[Any] = [] def populate_output(UpperCAmelCase : Node | None , UpperCAmelCase : int ) -> None: if not root: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.left , level - 1 ) populate_output(root.right , level - 1 ) populate_output(UpperCAmelCase , UpperCAmelCase ) return output def _UpperCAmelCase ( UpperCAmelCase : Node | None , UpperCAmelCase : int ): """simple docstring""" __lowerCamelCase : list[Any] = [] def populate_output(UpperCAmelCase : Node | None , UpperCAmelCase : int ) -> None: if root is None: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.right , level - 1 ) populate_output(root.left , level - 1 ) populate_output(UpperCAmelCase , UpperCAmelCase ) return output def _UpperCAmelCase ( UpperCAmelCase : Node | None ): """simple docstring""" if root is None: return [] __lowerCamelCase : list[Sequence[Node | None]] = [] __lowerCamelCase : List[str] = 0 __lowerCamelCase : int = height(UpperCAmelCase ) for h in range(1 , height_tree + 1 ): if not flag: output.append(get_nodes_from_left_to_right(UpperCAmelCase , UpperCAmelCase ) ) __lowerCamelCase : Optional[Any] = 1 else: output.append(get_nodes_from_right_to_left(UpperCAmelCase , UpperCAmelCase ) ) __lowerCamelCase : Any = 0 return output def _UpperCAmelCase ( ): # Main function for testing. """simple docstring""" __lowerCamelCase : Tuple = make_tree() print(f"""In-order Traversal: {inorder(UpperCAmelCase )}""" ) print(f"""Pre-order Traversal: {preorder(UpperCAmelCase )}""" ) print(f"""Post-order Traversal: {postorder(UpperCAmelCase )}""" , """\n""" ) print(f"""Height of Tree: {height(UpperCAmelCase )}""" , """\n""" ) print("""Complete Level Order Traversal: """ ) print(level_order(UpperCAmelCase ) , """\n""" ) print("""Level-wise order Traversal: """ ) for level in range(1 , height(UpperCAmelCase ) + 1 ): print(f"""Level {level}:""" , get_nodes_from_left_to_right(UpperCAmelCase , level=UpperCAmelCase ) ) print("""\nZigZag order Traversal: """ ) print(zigzag(UpperCAmelCase ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
519
import argparse import torch from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() __UpperCamelCase : List[str] = logging.get_logger(__name__) __UpperCamelCase : Tuple = [ ['attention', 'attn'], ['encoder_attention', 'encoder_attn'], ['q_lin', 'q_proj'], ['k_lin', 'k_proj'], ['v_lin', 'v_proj'], ['out_lin', 'out_proj'], ['norm_embeddings', 'layernorm_embedding'], ['position_embeddings', 'embed_positions'], ['embeddings', 'embed_tokens'], ['ffn.lin', 'fc'], ] def _UpperCAmelCase ( UpperCAmelCase : int ): """simple docstring""" if k == "embeddings.weight": return "shared.weight" for parlai_name, hf_name in PATTERNS: __lowerCamelCase : Union[str, Any] = k.replace(UpperCAmelCase , UpperCAmelCase ) if k.startswith("""encoder""" ): __lowerCamelCase : Optional[int] = k.replace(""".attn""" , """.self_attn""" ) __lowerCamelCase : List[str] = k.replace("""norm1""" , """self_attn_layer_norm""" ) __lowerCamelCase : Dict = k.replace("""norm2""" , """final_layer_norm""" ) elif k.startswith("""decoder""" ): __lowerCamelCase : Optional[Any] = k.replace("""norm1""" , """self_attn_layer_norm""" ) __lowerCamelCase : int = k.replace("""norm2""" , """encoder_attn_layer_norm""" ) __lowerCamelCase : Union[str, Any] = k.replace("""norm3""" , """final_layer_norm""" ) return k def _UpperCAmelCase ( UpperCAmelCase : Dict ): """simple docstring""" __lowerCamelCase : Optional[int] = [ """model.encoder.layernorm_embedding.weight""", """model.encoder.layernorm_embedding.bias""", """model.decoder.layernorm_embedding.weight""", """model.decoder.layernorm_embedding.bias""", ] for k in keys: __lowerCamelCase : str = sd.pop(UpperCAmelCase ) __lowerCamelCase : int = k.replace("""layernorm_embedding""" , """layer_norm""" ) assert new_k not in sd __lowerCamelCase : str = v __UpperCamelCase : Optional[Any] = ['START'] @torch.no_grad() def _UpperCAmelCase ( UpperCAmelCase : Dict , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] ): """simple docstring""" __lowerCamelCase : Dict = torch.load(UpperCAmelCase , map_location="""cpu""" ) __lowerCamelCase : List[str] = model["""model"""] __lowerCamelCase : List[str] = BlenderbotConfig.from_json_file(UpperCAmelCase ) __lowerCamelCase : List[Any] = BlenderbotForConditionalGeneration(UpperCAmelCase ) __lowerCamelCase : str = m.model.state_dict().keys() __lowerCamelCase : Tuple = [] __lowerCamelCase : Dict = {} for k, v in sd.items(): if k in IGNORE_KEYS: continue __lowerCamelCase : Optional[Any] = rename_state_dict_key(UpperCAmelCase ) if new_k not in valid_keys: failures.append([k, new_k] ) else: __lowerCamelCase : Dict = v if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm rename_layernorm_keys(UpperCAmelCase ) m.model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase ) m.half() m.save_pretrained(UpperCAmelCase ) if __name__ == "__main__": __UpperCamelCase : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument('--src_path', type=str, help='like blenderbot-model.bin') parser.add_argument('--save_dir', default='hf_blenderbot', type=str, help='Where to save converted model.') parser.add_argument( '--hf_config_json', default='blenderbot-3b-config.json', type=str, help='Path to config to use' ) __UpperCamelCase : Union[str, Any] = parser.parse_args() convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
519
1
"""simple docstring""" import argparse import glob import logging import os import time from argparse import Namespace import numpy as np import torch from lightning_base import BaseTransformer, add_generic_args, generic_train from torch.utils.data import DataLoader, TensorDataset from transformers import glue_compute_metrics as compute_metrics from transformers import glue_convert_examples_to_features as convert_examples_to_features from transformers import glue_output_modes, glue_tasks_num_labels from transformers import glue_processors as processors SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__) class lowerCAmelCase_ ( lowerCAmelCase ): """simple docstring""" _lowerCAmelCase : str = """sequence-classification""" def __init__( self , lowerCAmelCase ): """simple docstring""" if type(lowerCAmelCase ) == dict: snake_case = Namespace(**lowerCAmelCase ) snake_case = glue_output_modes[hparams.task] snake_case = glue_tasks_num_labels[hparams.task] super().__init__(lowerCAmelCase , lowerCAmelCase , self.mode ) def snake_case ( self , **lowerCAmelCase ): """simple docstring""" return self.model(**lowerCAmelCase ) def snake_case ( self , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" snake_case = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]} if self.config.model_type not in ["distilbert", "bart"]: snake_case = batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None snake_case = self(**lowerCAmelCase ) snake_case = outputs[0] snake_case = self.trainer.lr_schedulers[0]['scheduler'] snake_case = {'loss': loss, 'rate': lr_scheduler.get_last_lr()[-1]} return {"loss": loss, "log": tensorboard_logs} def snake_case ( self ): """simple docstring""" snake_case = self.hparams snake_case = processors[args.task]() snake_case = processor.get_labels() for mode in ["train", "dev"]: snake_case = self._feature_file(lowerCAmelCase ) if os.path.exists(lowerCAmelCase ) and not args.overwrite_cache: logger.info('Loading features from cached file %s' , lowerCAmelCase ) else: logger.info('Creating features from dataset file at %s' , args.data_dir ) snake_case = ( processor.get_dev_examples(args.data_dir ) if mode == 'dev' else processor.get_train_examples(args.data_dir ) ) snake_case = convert_examples_to_features( lowerCAmelCase , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , ) logger.info('Saving features into cached file %s' , lowerCAmelCase ) torch.save(lowerCAmelCase , lowerCAmelCase ) def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = False ): """simple docstring""" snake_case = 'dev' if mode == 'test' else mode snake_case = self._feature_file(lowerCAmelCase ) logger.info('Loading features from cached file %s' , lowerCAmelCase ) snake_case = torch.load(lowerCAmelCase ) snake_case = torch.tensor([f.input_ids for f in features] , dtype=torch.long ) snake_case = torch.tensor([f.attention_mask for f in features] , dtype=torch.long ) snake_case = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long ) if self.hparams.glue_output_mode == "classification": snake_case = torch.tensor([f.label for f in features] , dtype=torch.long ) elif self.hparams.glue_output_mode == "regression": snake_case = torch.tensor([f.label for f in features] , dtype=torch.float ) return DataLoader( TensorDataset(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) , batch_size=lowerCAmelCase , shuffle=lowerCAmelCase , ) def snake_case ( self , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" snake_case = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]} if self.config.model_type not in ["distilbert", "bart"]: snake_case = batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None snake_case = self(**lowerCAmelCase ) snake_case ,snake_case = outputs[:2] snake_case = logits.detach().cpu().numpy() snake_case = inputs['labels'].detach().cpu().numpy() return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids} def snake_case ( self , lowerCAmelCase ): """simple docstring""" snake_case = torch.stack([x['val_loss'] for x in outputs] ).mean().detach().cpu().item() snake_case = np.concatenate([x['pred'] for x in outputs] , axis=0 ) if self.hparams.glue_output_mode == "classification": snake_case = np.argmax(lowerCAmelCase , axis=1 ) elif self.hparams.glue_output_mode == "regression": snake_case = np.squeeze(lowerCAmelCase ) snake_case = np.concatenate([x['target'] for x in outputs] , axis=0 ) snake_case = [[] for _ in range(out_label_ids.shape[0] )] snake_case = [[] for _ in range(out_label_ids.shape[0] )] snake_case = {**{'val_loss': val_loss_mean}, **compute_metrics(self.hparams.task , lowerCAmelCase , lowerCAmelCase )} snake_case = dict(results.items() ) snake_case = results return ret, preds_list, out_label_list def snake_case ( self , lowerCAmelCase ): """simple docstring""" snake_case ,snake_case ,snake_case = self._eval_end(lowerCAmelCase ) snake_case = ret['log'] return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs} def snake_case ( self , lowerCAmelCase ): """simple docstring""" snake_case ,snake_case ,snake_case = self._eval_end(lowerCAmelCase ) snake_case = ret['log'] # `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss` return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs} @staticmethod def snake_case ( lowerCAmelCase , lowerCAmelCase ): """simple docstring""" BaseTransformer.add_model_specific_args(lowerCAmelCase , lowerCAmelCase ) parser.add_argument( '--max_seq_length' , default=1_28 , type=lowerCAmelCase , help=( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) , ) parser.add_argument( '--task' , default='' , type=lowerCAmelCase , required=lowerCAmelCase , help='The GLUE task to run' , ) parser.add_argument( '--gpus' , default=0 , type=lowerCAmelCase , help='The number of GPUs allocated for this, it is by default 0 meaning none' , ) parser.add_argument( '--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' ) return parser def lowerCAmelCase__ ( ) -> Optional[Any]: """simple docstring""" snake_case = argparse.ArgumentParser() add_generic_args(_UpperCamelCase , os.getcwd() ) snake_case = GLUETransformer.add_model_specific_args(_UpperCamelCase , os.getcwd() ) snake_case = parser.parse_args() # If output_dir not provided, a folder will be generated in pwd if args.output_dir is None: snake_case = os.path.join( './results' , f"""{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}""" , ) os.makedirs(args.output_dir ) snake_case = GLUETransformer(_UpperCamelCase ) snake_case = generic_train(_UpperCamelCase , _UpperCamelCase ) # Optionally, predict on dev set and write to output_dir if args.do_predict: snake_case = sorted(glob.glob(os.path.join(args.output_dir , 'checkpoint-epoch=*.ckpt' ) , recursive=_UpperCamelCase ) ) snake_case = model.load_from_checkpoint(checkpoints[-1] ) return trainer.test(_UpperCamelCase ) if __name__ == "__main__": main()
104
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import is_flaky, require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DonutImageProcessor class lowerCAmelCase_ ( unittest.TestCase ): """simple docstring""" def __init__( self , lowerCAmelCase , lowerCAmelCase=7 , lowerCAmelCase=3 , lowerCAmelCase=18 , lowerCAmelCase=30 , lowerCAmelCase=4_00 , lowerCAmelCase=True , lowerCAmelCase=None , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=[0.5, 0.5, 0.5] , lowerCAmelCase=[0.5, 0.5, 0.5] , ): """simple docstring""" snake_case = parent snake_case = batch_size snake_case = num_channels snake_case = image_size snake_case = min_resolution snake_case = max_resolution snake_case = do_resize snake_case = size if size is not None else {'height': 18, 'width': 20} snake_case = do_thumbnail snake_case = do_align_axis snake_case = do_pad snake_case = do_normalize snake_case = image_mean snake_case = image_std def snake_case ( self ): """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_thumbnail": self.do_thumbnail, "do_align_long_axis": self.do_align_axis, "do_pad": self.do_pad, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class lowerCAmelCase_ ( lowerCAmelCase , unittest.TestCase ): """simple docstring""" _lowerCAmelCase : str = DonutImageProcessor if is_vision_available() else None def snake_case ( self ): """simple docstring""" snake_case = DonutImageProcessingTester(self ) @property def snake_case ( self ): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def snake_case ( self ): """simple docstring""" snake_case = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCAmelCase , 'do_resize' ) ) self.assertTrue(hasattr(lowerCAmelCase , 'size' ) ) self.assertTrue(hasattr(lowerCAmelCase , 'do_thumbnail' ) ) self.assertTrue(hasattr(lowerCAmelCase , 'do_align_long_axis' ) ) self.assertTrue(hasattr(lowerCAmelCase , 'do_pad' ) ) self.assertTrue(hasattr(lowerCAmelCase , 'do_normalize' ) ) self.assertTrue(hasattr(lowerCAmelCase , 'image_mean' ) ) self.assertTrue(hasattr(lowerCAmelCase , 'image_std' ) ) def snake_case ( self ): """simple docstring""" snake_case = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'height': 18, 'width': 20} ) snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {'height': 42, 'width': 42} ) # Previous config had dimensions in (width, height) order snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) ) self.assertEqual(image_processor.size , {'height': 84, 'width': 42} ) def snake_case ( self ): """simple docstring""" pass @is_flaky() def snake_case ( self ): """simple docstring""" snake_case = self.image_processing_class(**self.image_processor_dict ) # create random PIL images snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase , Image.Image ) # Test not batched input snake_case = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched snake_case = image_processing(lowerCAmelCase , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) @is_flaky() def snake_case ( self ): """simple docstring""" snake_case = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , numpify=lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase , np.ndarray ) # Test not batched input snake_case = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched snake_case = image_processing(lowerCAmelCase , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) @is_flaky() def snake_case ( self ): """simple docstring""" snake_case = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , torchify=lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase , torch.Tensor ) # Test not batched input snake_case = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched snake_case = image_processing(lowerCAmelCase , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , )
104
1
'''simple docstring''' def snake_case_ (UpperCamelCase : int ): '''simple docstring''' if not isinstance(UpperCamelCase , UpperCamelCase ): _a = f'Input value of [number={number}] must be an integer' raise TypeError(UpperCamelCase ) if number < 0: return False _a = number * number while number > 0: if number % 10 != number_square % 10: return False number //= 10 number_square //= 10 return True if __name__ == "__main__": import doctest doctest.testmod()
22
import random def lowerCamelCase_ ( UpperCamelCase__ : list, UpperCamelCase__ : List[Any] ): '''simple docstring''' UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = [], [], [] for element in data: if element < pivot: less.append(UpperCamelCase__ ) elif element > pivot: greater.append(UpperCamelCase__ ) else: equal.append(UpperCamelCase__ ) return less, equal, greater def lowerCamelCase_ ( UpperCamelCase__ : list, UpperCamelCase__ : int ): '''simple docstring''' if index >= len(UpperCamelCase__ ) or index < 0: return None UpperCamelCase__ = items[random.randint(0, len(UpperCamelCase__ ) - 1 )] UpperCamelCase__ = 0 UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = _partition(UpperCamelCase__, UpperCamelCase__ ) UpperCamelCase__ = len(UpperCamelCase__ ) UpperCamelCase__ = len(UpperCamelCase__ ) # index is the pivot if m <= index < m + count: return pivot # must be in smaller elif m > index: return quick_select(UpperCamelCase__, UpperCamelCase__ ) # must be in larger else: return quick_select(UpperCamelCase__, index - (m + count) )
240
0
import json import os import shutil import warnings from argparse import ArgumentParser, Namespace from pathlib import Path from typing import List from ..utils import logging from . import BaseTransformersCLICommand try: from cookiecutter.main import cookiecutter snake_case_ : str = True except ImportError: snake_case_ : Optional[Any] = False snake_case_ : int = logging.get_logger(__name__) # pylint: disable=invalid-name def lowerCamelCase( a__): return AddNewModelCommand(args.testing ,args.testing_file ,path=args.path) class A__ ( UpperCamelCase__ ): @staticmethod def __UpperCamelCase ( _a : ArgumentParser ) -> int: """simple docstring""" _SCREAMING_SNAKE_CASE =parser.add_parser('''add-new-model''' ) add_new_model_parser.add_argument('''--testing''' , action='''store_true''' , help='''If in testing mode.''' ) add_new_model_parser.add_argument('''--testing_file''' , type=_a , help='''Configuration file on which to run.''' ) add_new_model_parser.add_argument( '''--path''' , type=_a , help='''Path to cookiecutter. Should only be used for testing purposes.''' ) add_new_model_parser.set_defaults(func=_a ) def __init__( self : List[str] , _a : bool , _a : str , _a : Any=None , *_a : Union[str, Any] ) -> Any: """simple docstring""" _SCREAMING_SNAKE_CASE =testing _SCREAMING_SNAKE_CASE =testing_file _SCREAMING_SNAKE_CASE =path def __UpperCamelCase ( self : List[Any] ) -> int: """simple docstring""" warnings.warn( '''The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. ''' '''It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality ''' '''checks, you should use `transformers-cli add-new-model-like` instead.''' ) if not _has_cookiecutter: raise ImportError( '''Model creation dependencies are required to use the `add_new_model` command. Install them by running ''' '''the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n''' ) # Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory _SCREAMING_SNAKE_CASE =[directory for directory in os.listdir() if '''cookiecutter-template-''' == directory[:22]] if len(_a ) > 0: raise ValueError( '''Several directories starting with `cookiecutter-template-` in current working directory. ''' '''Please clean your directory by removing all folders starting with `cookiecutter-template-` or ''' '''change your working directory.''' ) _SCREAMING_SNAKE_CASE =( Path(_a ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent ) _SCREAMING_SNAKE_CASE =path_to_transformer_root / '''templates''' / '''adding_a_new_model''' # Execute cookiecutter if not self._testing: cookiecutter(str(_a ) ) else: with open(self._testing_file , '''r''' ) as configuration_file: _SCREAMING_SNAKE_CASE =json.load(_a ) cookiecutter( str(path_to_cookiecutter if self._path is None else self._path ) , no_input=_a , extra_context=_a , ) _SCREAMING_SNAKE_CASE =[directory for directory in os.listdir() if '''cookiecutter-template-''' in directory[:22]][0] # Retrieve configuration with open(directory + '''/configuration.json''' , '''r''' ) as configuration_file: _SCREAMING_SNAKE_CASE =json.load(_a ) _SCREAMING_SNAKE_CASE =configuration['''lowercase_modelname'''] _SCREAMING_SNAKE_CASE =configuration['''generate_tensorflow_pytorch_and_flax'''] os.remove(f"{directory}/configuration.json" ) _SCREAMING_SNAKE_CASE ='''PyTorch''' in generate_tensorflow_pytorch_and_flax _SCREAMING_SNAKE_CASE ='''TensorFlow''' in generate_tensorflow_pytorch_and_flax _SCREAMING_SNAKE_CASE ='''Flax''' in generate_tensorflow_pytorch_and_flax _SCREAMING_SNAKE_CASE =f"{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}" os.makedirs(_a , exist_ok=_a ) os.makedirs(f"{path_to_transformer_root}/tests/models/{lowercase_model_name}" , exist_ok=_a ) # Tests require submodules as they have parent imports with open(f"{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py" , '''w''' ): pass shutil.move( f"{directory}/__init__.py" , f"{model_dir}/__init__.py" , ) shutil.move( f"{directory}/configuration_{lowercase_model_name}.py" , f"{model_dir}/configuration_{lowercase_model_name}.py" , ) def remove_copy_lines(_a : int ): with open(_a , '''r''' ) as f: _SCREAMING_SNAKE_CASE =f.readlines() with open(_a , '''w''' ) as f: for line in lines: if "# Copied from transformers." not in line: f.write(_a ) if output_pytorch: if not self._testing: remove_copy_lines(f"{directory}/modeling_{lowercase_model_name}.py" ) shutil.move( f"{directory}/modeling_{lowercase_model_name}.py" , f"{model_dir}/modeling_{lowercase_model_name}.py" , ) shutil.move( f"{directory}/test_modeling_{lowercase_model_name}.py" , f"{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py" , ) else: os.remove(f"{directory}/modeling_{lowercase_model_name}.py" ) os.remove(f"{directory}/test_modeling_{lowercase_model_name}.py" ) if output_tensorflow: if not self._testing: remove_copy_lines(f"{directory}/modeling_tf_{lowercase_model_name}.py" ) shutil.move( f"{directory}/modeling_tf_{lowercase_model_name}.py" , f"{model_dir}/modeling_tf_{lowercase_model_name}.py" , ) shutil.move( f"{directory}/test_modeling_tf_{lowercase_model_name}.py" , f"{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py" , ) else: os.remove(f"{directory}/modeling_tf_{lowercase_model_name}.py" ) os.remove(f"{directory}/test_modeling_tf_{lowercase_model_name}.py" ) if output_flax: if not self._testing: remove_copy_lines(f"{directory}/modeling_flax_{lowercase_model_name}.py" ) shutil.move( f"{directory}/modeling_flax_{lowercase_model_name}.py" , f"{model_dir}/modeling_flax_{lowercase_model_name}.py" , ) shutil.move( f"{directory}/test_modeling_flax_{lowercase_model_name}.py" , f"{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py" , ) else: os.remove(f"{directory}/modeling_flax_{lowercase_model_name}.py" ) os.remove(f"{directory}/test_modeling_flax_{lowercase_model_name}.py" ) shutil.move( f"{directory}/{lowercase_model_name}.md" , f"{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md" , ) shutil.move( f"{directory}/tokenization_{lowercase_model_name}.py" , f"{model_dir}/tokenization_{lowercase_model_name}.py" , ) shutil.move( f"{directory}/tokenization_fast_{lowercase_model_name}.py" , f"{model_dir}/tokenization_{lowercase_model_name}_fast.py" , ) from os import fdopen, remove from shutil import copymode, move from tempfile import mkstemp def replace(_a : str , _a : str , _a : List[str] ): # Create temp file _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =mkstemp() _SCREAMING_SNAKE_CASE =False with fdopen(_a , '''w''' ) as new_file: with open(_a ) as old_file: for line in old_file: new_file.write(_a ) if line_to_copy_below in line: _SCREAMING_SNAKE_CASE =True for line_to_copy in lines_to_copy: new_file.write(_a ) if not line_found: raise ValueError(f"Line {line_to_copy_below} was not found in file." ) # Copy the file permissions from the old file to the new file copymode(_a , _a ) # Remove original file remove(_a ) # Move new file move(_a , _a ) def skip_units(_a : str ): return ( ("generating PyTorch" in line and not output_pytorch) or ("generating TensorFlow" in line and not output_tensorflow) or ("generating Flax" in line and not output_flax) ) def replace_in_files(_a : Any ): with open(_a ) as datafile: _SCREAMING_SNAKE_CASE =[] _SCREAMING_SNAKE_CASE =False _SCREAMING_SNAKE_CASE =False for line in datafile: if "# To replace in: " in line and "##" not in line: _SCREAMING_SNAKE_CASE =line.split('''"''' )[1] _SCREAMING_SNAKE_CASE =skip_units(_a ) elif "# Below: " in line and "##" not in line: _SCREAMING_SNAKE_CASE =line.split('''"''' )[1] _SCREAMING_SNAKE_CASE =skip_units(_a ) elif "# End." in line and "##" not in line: if not skip_file and not skip_snippet: replace(_a , _a , _a ) _SCREAMING_SNAKE_CASE =[] elif "# Replace with" in line and "##" not in line: _SCREAMING_SNAKE_CASE =[] elif "##" not in line: lines_to_copy.append(_a ) remove(_a ) replace_in_files(f"{directory}/to_replace_{lowercase_model_name}.py" ) os.rmdir(_a )
191
from ...utils import logging from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel from .configuration_mta import MTaConfig snake_case_ : List[Any] = logging.get_logger(__name__) snake_case_ : Optional[Any] = '''T5Config''' class A__ ( UpperCamelCase__ ): UpperCAmelCase = "mt5" UpperCAmelCase = MTaConfig class A__ ( UpperCamelCase__ ): UpperCAmelCase = "mt5" UpperCAmelCase = MTaConfig class A__ ( UpperCamelCase__ ): UpperCAmelCase = "mt5" UpperCAmelCase = MTaConfig
191
1
'''simple docstring''' import warnings from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch from ...models import UNetaDModel from ...schedulers import RePaintScheduler from ...utils import PIL_INTERPOLATION, logging, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput _a : int = logging.get_logger(__name__) # pylint: disable=invalid-name def _a (lowercase__ : Union[List, PIL.Image.Image, torch.Tensor] ) -> Optional[int]: """simple docstring""" warnings.warn( 'The preprocess method is deprecated and will be removed in a future version. Please' ' use VaeImageProcessor.preprocess instead' , lowercase__ , ) if isinstance(lowercase__ , torch.Tensor ): return image elif isinstance(lowercase__ , PIL.Image.Image ): __snake_case = [image] if isinstance(image[0] , PIL.Image.Image ): __snake_case , __snake_case = image[0].size __snake_case , __snake_case = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8 __snake_case = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image] __snake_case = np.concatenate(lowercase__ , axis=0 ) __snake_case = np.array(lowercase__ ).astype(np.floataa ) / 2_55.0 __snake_case = image.transpose(0 , 3 , 1 , 2 ) __snake_case = 2.0 * image - 1.0 __snake_case = torch.from_numpy(lowercase__ ) elif isinstance(image[0] , torch.Tensor ): __snake_case = torch.cat(lowercase__ , dim=0 ) return image def _a (lowercase__ : Union[List, PIL.Image.Image, torch.Tensor] ) -> Dict: """simple docstring""" if isinstance(lowercase__ , torch.Tensor ): return mask elif isinstance(lowercase__ , PIL.Image.Image ): __snake_case = [mask] if isinstance(mask[0] , PIL.Image.Image ): __snake_case , __snake_case = mask[0].size __snake_case , __snake_case = (x - x % 3_2 for x in (w, h)) # resize to integer multiple of 32 __snake_case = [np.array(m.convert('L' ).resize((w, h) , resample=PIL_INTERPOLATION['nearest'] ) )[None, :] for m in mask] __snake_case = np.concatenate(lowercase__ , axis=0 ) __snake_case = mask.astype(np.floataa ) / 2_55.0 __snake_case = 0 __snake_case = 1 __snake_case = torch.from_numpy(lowercase__ ) elif isinstance(mask[0] , torch.Tensor ): __snake_case = torch.cat(lowercase__ , dim=0 ) return mask class _lowercase ( __lowercase ): _SCREAMING_SNAKE_CASE : UNetaDModel _SCREAMING_SNAKE_CASE : RePaintScheduler def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Tuple ) -> Optional[Any]: super().__init__() self.register_modules(unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ ) @torch.no_grad() def __call__( self : List[Any] , SCREAMING_SNAKE_CASE_ : Union[torch.Tensor, PIL.Image.Image] , SCREAMING_SNAKE_CASE_ : Union[torch.Tensor, PIL.Image.Image] , SCREAMING_SNAKE_CASE_ : int = 250 , SCREAMING_SNAKE_CASE_ : float = 0.0 , SCREAMING_SNAKE_CASE_ : int = 10 , SCREAMING_SNAKE_CASE_ : int = 10 , SCREAMING_SNAKE_CASE_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , SCREAMING_SNAKE_CASE_ : Optional[str] = "pil" , SCREAMING_SNAKE_CASE_ : bool = True , ) -> Union[ImagePipelineOutput, Tuple]: __snake_case = image __snake_case = _preprocess_image(SCREAMING_SNAKE_CASE_ ) __snake_case = original_image.to(device=self.device , dtype=self.unet.dtype ) __snake_case = _preprocess_mask(SCREAMING_SNAKE_CASE_ ) __snake_case = mask_image.to(device=self.device , dtype=self.unet.dtype ) __snake_case = original_image.shape[0] # sample gaussian noise to begin the loop if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and len(SCREAMING_SNAKE_CASE_ ) != batch_size: raise ValueError( f'You have passed a list of generators of length {len(SCREAMING_SNAKE_CASE_ )}, but requested an effective batch' f' size of {batch_size}. Make sure the batch size matches the length of the generators.' ) __snake_case = original_image.shape __snake_case = randn_tensor(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=self.unet.dtype ) # set step values self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.device ) __snake_case = eta __snake_case = self.scheduler.timesteps[0] + 1 __snake_case = generator[0] if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else generator for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): if t < t_last: # predict the noise residual __snake_case = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).sample # compute previous image: x_t -> x_t-1 __snake_case = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample else: # compute the reverse: x_t-1 -> x_t __snake_case = self.scheduler.undo_step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __snake_case = t __snake_case = (image / 2 + 0.5).clamp(0 , 1 ) __snake_case = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": __snake_case = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE_ )
56
import copy import inspect import unittest from transformers import AutoBackbone from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import require_timm, require_torch, torch_device from transformers.utils.import_utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor if is_torch_available(): import torch from transformers import TimmBackbone, TimmBackboneConfig from ...test_pipeline_mixin import PipelineTesterMixin class lowercase_ : def __init__( self , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_="resnet50" , lowercase_=3 , lowercase_=32 , lowercase_=3 , lowercase_=True , lowercase_=True , ) -> Union[str, Any]: a__ =parent a__ =out_indices if out_indices is not None else [4] a__ =stage_names a__ =out_features a__ =backbone a__ =batch_size a__ =image_size a__ =num_channels a__ =use_pretrained_backbone a__ =is_training def __UpperCamelCase ( self) -> Optional[Any]: a__ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) a__ =self.get_config() return config, pixel_values def __UpperCamelCase ( self) -> Tuple: return TimmBackboneConfig( image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , ) def __UpperCamelCase ( self , lowercase_ , lowercase_) -> str: a__ =TimmBackbone(config=lowercase_) model.to(lowercase_) model.eval() with torch.no_grad(): a__ =model(lowercase_) self.parent.assertEqual( result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , ) def __UpperCamelCase ( self) -> str: a__ =self.prepare_config_and_inputs() a__ , a__ =config_and_inputs a__ ={'pixel_values': pixel_values} return config, inputs_dict @require_torch @require_timm class lowercase_ (lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ): snake_case =(TimmBackbone,) if is_torch_available() else () snake_case ={'feature-extraction': TimmBackbone} if is_torch_available() else {} snake_case =False snake_case =False snake_case =False snake_case =False def __UpperCamelCase ( self) -> Optional[Any]: a__ =TimmBackboneModelTester(self) a__ =ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_) def __UpperCamelCase ( self) -> Dict: self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def __UpperCamelCase ( self) -> str: a__ ='resnet18' a__ ='microsoft/resnet-18' a__ =AutoBackbone.from_pretrained(lowercase_ , use_timm_backbone=lowercase_) a__ =AutoBackbone.from_pretrained(lowercase_) self.assertEqual(len(timm_model.out_features) , len(transformers_model.out_features)) self.assertEqual(len(timm_model.stage_names) , len(transformers_model.stage_names)) self.assertEqual(timm_model.channels , transformers_model.channels) # Out indices are set to the last layer by default. For timm models, we don't know # the number of layers in advance, so we set it to (-1,), whereas for transformers # models, we set it to [len(stage_names) - 1] (kept for backward compatibility). self.assertEqual(timm_model.out_indices , (-1,)) self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names) - 1]) a__ =AutoBackbone.from_pretrained(lowercase_ , use_timm_backbone=lowercase_ , out_indices=[1, 2, 3]) a__ =AutoBackbone.from_pretrained(lowercase_ , out_indices=[1, 2, 3]) self.assertEqual(timm_model.out_indices , transformers_model.out_indices) self.assertEqual(len(timm_model.out_features) , len(transformers_model.out_features)) self.assertEqual(timm_model.channels , transformers_model.channels) @unittest.skip('TimmBackbone doesn\'t support feed forward chunking') def __UpperCamelCase ( self) -> int: pass @unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute') def __UpperCamelCase ( self) -> List[str]: pass @unittest.skip('TimmBackbone initialization is managed on the timm side') def __UpperCamelCase ( self) -> Any: pass @unittest.skip('TimmBackbone models doesn\'t have inputs_embeds') def __UpperCamelCase ( self) -> Any: pass @unittest.skip('TimmBackbone models doesn\'t have inputs_embeds') def __UpperCamelCase ( self) -> List[str]: pass @unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint') def __UpperCamelCase ( self) -> Optional[int]: pass @unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone') def __UpperCamelCase ( self) -> Union[str, Any]: pass @unittest.skip('model weights aren\'t tied in TimmBackbone.') def __UpperCamelCase ( self) -> Dict: pass @unittest.skip('model weights aren\'t tied in TimmBackbone.') def __UpperCamelCase ( self) -> List[Any]: pass @unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone') def __UpperCamelCase ( self) -> List[str]: pass @unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone') def __UpperCamelCase ( self) -> Union[str, Any]: pass @unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.') def __UpperCamelCase ( self) -> int: pass @unittest.skip('TimmBackbone doesn\'t support output_attentions.') def __UpperCamelCase ( self) -> str: pass @unittest.skip('Safetensors is not supported by timm.') def __UpperCamelCase ( self) -> Optional[int]: pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.') def __UpperCamelCase ( self) -> Optional[Any]: pass def __UpperCamelCase ( self) -> Any: a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a__ =model_class(lowercase_) a__ =inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic a__ =[*signature.parameters.keys()] a__ =['pixel_values'] self.assertListEqual(arg_names[:1] , lowercase_) def __UpperCamelCase ( self) -> Any: a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common() a__ =True a__ =self.has_attentions # no need to test all models as different heads yield the same functionality a__ =self.all_model_classes[0] a__ =model_class(lowercase_) model.to(lowercase_) a__ =self._prepare_for_class(lowercase_ , lowercase_) a__ =model(**lowercase_) a__ =outputs[0][-1] # Encoder-/Decoder-only models a__ =outputs.hidden_states[0] hidden_states.retain_grad() if self.has_attentions: a__ =outputs.attentions[0] attentions.retain_grad() output.flatten()[0].backward(retain_graph=lowercase_) self.assertIsNotNone(hidden_states.grad) if self.has_attentions: self.assertIsNotNone(attentions.grad) def __UpperCamelCase ( self) -> List[str]: a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a__ =model_class(lowercase_) model.to(lowercase_) model.eval() a__ =model(**lowercase_) self.assertEqual(len(result.feature_maps) , len(config.out_indices)) self.assertEqual(len(model.channels) , len(config.out_indices)) # Check output of last stage is taken if out_features=None, out_indices=None a__ =copy.deepcopy(lowercase_) a__ =None a__ =model_class(lowercase_) model.to(lowercase_) model.eval() a__ =model(**lowercase_) self.assertEqual(len(result.feature_maps) , 1) self.assertEqual(len(model.channels) , 1) # Check backbone can be initialized with fresh weights a__ =copy.deepcopy(lowercase_) a__ =False a__ =model_class(lowercase_) model.to(lowercase_) model.eval() a__ =model(**lowercase_)
20
0
"""simple docstring""" def SCREAMING_SNAKE_CASE ( lowerCamelCase_): a__ = [0] * len(lowerCamelCase_) for i in range(1 , len(lowerCamelCase_)): # use last results for better performance - dynamic programming a__ = prefix_result[i - 1] while j > 0 and input_string[i] != input_string[j]: a__ = prefix_result[j - 1] if input_string[i] == input_string[j]: j += 1 a__ = j return prefix_result def SCREAMING_SNAKE_CASE ( lowerCamelCase_): return max(prefix_function(lowerCamelCase_)) if __name__ == "__main__": import doctest doctest.testmod()
707
"""simple docstring""" from __future__ import annotations import copy import inspect import unittest import numpy as np from transformers import is_tf_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, TFLayoutLMvaModel, ) if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class _SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self: List[str] , __A: str , __A: Any=2 , __A: str=3 , __A: Tuple=4 , __A: Dict=2 , __A: List[Any]=7 , __A: Any=True , __A: Any=True , __A: List[str]=True , __A: Optional[int]=True , __A: Optional[int]=99 , __A: Tuple=36 , __A: List[str]=2 , __A: Dict=4 , __A: List[str]=37 , __A: Optional[int]="gelu" , __A: Optional[int]=0.1 , __A: Tuple=0.1 , __A: List[Any]=512 , __A: List[str]=16 , __A: Any=2 , __A: Union[str, Any]=0.0_2 , __A: Optional[int]=6 , __A: Union[str, Any]=6 , __A: Union[str, Any]=3 , __A: Tuple=4 , __A: Optional[int]=None , __A: Optional[Any]=1000 , ): '''simple docstring''' a__ = parent a__ = batch_size a__ = num_channels a__ = image_size a__ = patch_size a__ = is_training a__ = use_input_mask a__ = use_token_type_ids a__ = use_labels a__ = vocab_size a__ = hidden_size a__ = num_hidden_layers a__ = num_attention_heads a__ = intermediate_size a__ = hidden_act a__ = hidden_dropout_prob a__ = attention_probs_dropout_prob a__ = max_position_embeddings a__ = type_vocab_size a__ = type_sequence_label_size a__ = initializer_range a__ = coordinate_size a__ = shape_size a__ = num_labels a__ = num_choices a__ = scope a__ = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) a__ = text_seq_length a__ = (image_size // patch_size) ** 2 + 1 a__ = self.text_seq_length + self.image_seq_length def lowercase ( self: Optional[int] ): '''simple docstring''' a__ = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size ) a__ = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox ) a__ = bbox.numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: a__ = bbox[i, j, 3] a__ = bbox[i, j, 1] a__ = tmp_coordinate if bbox[i, j, 2] < bbox[i, j, 0]: a__ = bbox[i, j, 2] a__ = bbox[i, j, 0] a__ = tmp_coordinate a__ = tf.constant(__A ) a__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) a__ = None if self.use_input_mask: a__ = random_attention_mask([self.batch_size, self.text_seq_length] ) a__ = None if self.use_token_type_ids: a__ = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size ) a__ = None a__ = None if self.use_labels: a__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a__ = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels ) a__ = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def lowercase ( self: List[str] , __A: List[str] , __A: List[str] , __A: List[str] , __A: int , __A: Any , __A: Any ): '''simple docstring''' a__ = TFLayoutLMvaModel(config=__A ) # text + image a__ = model(__A , pixel_values=__A , training=__A ) a__ = model( __A , bbox=__A , pixel_values=__A , attention_mask=__A , token_type_ids=__A , training=__A , ) a__ = model(__A , bbox=__A , pixel_values=__A , training=__A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # text only a__ = model(__A , training=__A ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only a__ = model({'''pixel_values''': pixel_values} , training=__A ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) ) def lowercase ( self: Optional[int] , __A: Any , __A: str , __A: List[str] , __A: List[str] , __A: List[str] , __A: Optional[Any] , __A: Any ): '''simple docstring''' a__ = self.num_labels a__ = TFLayoutLMvaForSequenceClassification(config=__A ) a__ = model( __A , bbox=__A , pixel_values=__A , attention_mask=__A , token_type_ids=__A , labels=__A , training=__A , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowercase ( self: str , __A: List[str] , __A: int , __A: str , __A: Any , __A: str , __A: str , __A: str ): '''simple docstring''' a__ = self.num_labels a__ = TFLayoutLMvaForTokenClassification(config=__A ) a__ = model( __A , bbox=__A , pixel_values=__A , attention_mask=__A , token_type_ids=__A , labels=__A , training=__A , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) ) def lowercase ( self: Union[str, Any] , __A: Any , __A: List[Any] , __A: Any , __A: List[str] , __A: Any , __A: Optional[int] , __A: Tuple ): '''simple docstring''' a__ = 2 a__ = TFLayoutLMvaForQuestionAnswering(config=__A ) a__ = model( __A , bbox=__A , pixel_values=__A , attention_mask=__A , token_type_ids=__A , start_positions=__A , end_positions=__A , training=__A , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowercase ( self: int ): '''simple docstring''' a__ = self.prepare_config_and_inputs() ((a__) ,(a__) ,(a__) ,(a__) ,(a__) ,(a__) ,(a__) ,(a__)) = config_and_inputs a__ = { '''input_ids''': input_ids, '''bbox''': bbox, '''pixel_values''': pixel_values, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask, } return config, inputs_dict @require_tf class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase ): """simple docstring""" _SCREAMING_SNAKE_CASE =( ( TFLayoutLMvaModel, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, ) if is_tf_available() else () ) _SCREAMING_SNAKE_CASE =( {'document-question-answering': TFLayoutLMvaForQuestionAnswering, 'feature-extraction': TFLayoutLMvaModel} if is_tf_available() else {} ) _SCREAMING_SNAKE_CASE =False _SCREAMING_SNAKE_CASE =False _SCREAMING_SNAKE_CASE =False def lowercase ( self: List[str] , __A: Dict , __A: Optional[Any] , __A: str , __A: Union[str, Any] , __A: Optional[Any] ): '''simple docstring''' return True def lowercase ( self: Dict , __A: Optional[Any] , __A: Any , __A: List[Any]=False ): '''simple docstring''' a__ = copy.deepcopy(__A ) if model_class in get_values(__A ): a__ = { k: tf.tile(tf.expand_dims(__A , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) ) if isinstance(__A , tf.Tensor ) and v.ndim > 0 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(__A ): a__ = tf.ones(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(__A ): a__ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) a__ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(__A ): a__ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(__A ): a__ = tf.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa ) return inputs_dict def lowercase ( self: List[str] ): '''simple docstring''' a__ = TFLayoutLMvaModelTester(self ) a__ = ConfigTester(self , config_class=__A , hidden_size=37 ) def lowercase ( self: Dict ): '''simple docstring''' self.config_tester.run_common_tests() def lowercase ( self: Any ): '''simple docstring''' a__ ,a__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a__ = model_class(__A ) if getattr(__A , '''hf_compute_loss''' , __A ): # The number of elements in the loss should be the same as the number of elements in the label a__ = self._prepare_for_class(inputs_dict.copy() , __A , return_labels=__A ) a__ = prepared_for_class[ sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=__A )[0] ] a__ = added_label.shape.as_list()[:1] # Test that model correctly compute the loss with kwargs a__ = self._prepare_for_class(inputs_dict.copy() , __A , return_labels=__A ) a__ = prepared_for_class.pop('''input_ids''' ) a__ = model(__A , **__A )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss when we mask some positions a__ = self._prepare_for_class(inputs_dict.copy() , __A , return_labels=__A ) a__ = prepared_for_class.pop('''input_ids''' ) if "labels" in prepared_for_class: a__ = prepared_for_class['''labels'''].numpy() if len(labels.shape ) > 1 and labels.shape[1] != 1: a__ = -100 a__ = tf.convert_to_tensor(__A ) a__ = model(__A , **__A )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) ) # Test that model correctly compute the loss with a dict a__ = self._prepare_for_class(inputs_dict.copy() , __A , return_labels=__A ) a__ = model(__A )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss with a tuple a__ = self._prepare_for_class(inputs_dict.copy() , __A , return_labels=__A ) # Get keys that were added with the _prepare_for_class function a__ = prepared_for_class.keys() - inputs_dict.keys() a__ = inspect.signature(model.call ).parameters a__ = list(signature.keys() ) # Create a dictionary holding the location of the tensors in the tuple a__ = {0: '''input_ids'''} for label_key in label_keys: a__ = signature_names.index(__A ) a__ = label_key a__ = sorted(tuple_index_mapping.items() ) # Initialize a list with their default values, update the values and convert to a tuple a__ = [] for name in signature_names: if name != "kwargs": list_input.append(signature[name].default ) for index, value in sorted_tuple_index_mapping: a__ = prepared_for_class[value] a__ = tuple(__A ) # Send to model a__ = model(tuple_input[:-1] )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) def lowercase ( self: Optional[int] ): '''simple docstring''' ( ( a__ ) ,( a__ ) ,( a__ ) ,( a__ ) ,( a__ ) ,( a__ ) ,( a__ ) ,( a__ ) , ) = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(__A , __A , __A , __A , __A , __A ) def lowercase ( self: Dict ): '''simple docstring''' ( ( a__ ) ,( a__ ) ,( a__ ) ,( a__ ) ,( a__ ) ,( a__ ) ,( a__ ) ,( a__ ) , ) = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: a__ = type self.model_tester.create_and_check_model(__A , __A , __A , __A , __A , __A ) def lowercase ( self: int ): '''simple docstring''' ( ( a__ ) ,( a__ ) ,( a__ ) ,( a__ ) ,( a__ ) ,( a__ ) ,( a__ ) ,( a__ ) , ) = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification( __A , __A , __A , __A , __A , __A , __A ) def lowercase ( self: List[str] ): '''simple docstring''' ( ( a__ ) ,( a__ ) ,( a__ ) ,( a__ ) ,( a__ ) ,( a__ ) ,( a__ ) ,( a__ ) , ) = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification( __A , __A , __A , __A , __A , __A , __A ) def lowercase ( self: Tuple ): '''simple docstring''' ( ( a__ ) ,( a__ ) ,( a__ ) ,( a__ ) ,( a__ ) ,( a__ ) ,( a__ ) ,( a__ ) , ) = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering( __A , __A , __A , __A , __A , __A , __A ) @slow def lowercase ( self: Union[str, Any] ): '''simple docstring''' for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a__ = TFLayoutLMvaModel.from_pretrained(__A ) self.assertIsNotNone(__A ) def SCREAMING_SNAKE_CASE ( ): a__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''') return image @require_tf class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" @cached_property def lowercase ( self: Any ): '''simple docstring''' return LayoutLMvaImageProcessor(apply_ocr=__A ) if is_vision_available() else None @slow def lowercase ( self: Union[str, Any] ): '''simple docstring''' a__ = TFLayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' ) a__ = self.default_image_processor a__ = prepare_img() a__ = image_processor(images=__A , return_tensors='''tf''' ).pixel_values a__ = tf.constant([[1, 2]] ) a__ = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 ) # forward pass a__ = model(input_ids=__A , bbox=__A , pixel_values=__A , training=__A ) # verify the logits a__ = (1, 199, 768) self.assertEqual(outputs.last_hidden_state.shape , __A ) a__ = tf.constant( [[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , __A , atol=1e-4 ) )
200
0
import unittest from huggingface_hub import hf_hub_download from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor from transformers.pipelines import VideoClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_decord, require_tf, require_torch, require_torch_or_tf, require_vision, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf @require_vision @require_decord class A ( unittest.TestCase ): '''simple docstring''' A__ = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING def lowerCamelCase__ (self : List[str] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : Tuple ) -> Tuple: """simple docstring""" lowercase__ = hf_hub_download( repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" ) lowercase__ = VideoClassificationPipeline(model=_UpperCAmelCase , image_processor=_UpperCAmelCase , top_k=2 ) lowercase__ = [ example_video_filepath, """https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4""", ] return video_classifier, examples def lowerCamelCase__ (self : List[str] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple ) -> List[Any]: """simple docstring""" for example in examples: lowercase__ = video_classifier(_UpperCAmelCase ) self.assertEqual( _UpperCAmelCase , [ {"""score""": ANY(_UpperCAmelCase ), """label""": ANY(_UpperCAmelCase )}, {"""score""": ANY(_UpperCAmelCase ), """label""": ANY(_UpperCAmelCase )}, ] , ) @require_torch def lowerCamelCase__ (self : str ) -> List[Any]: """simple docstring""" lowercase__ = """hf-internal-testing/tiny-random-VideoMAEForVideoClassification""" lowercase__ = VideoMAEFeatureExtractor( size={"""shortest_edge""": 10} , crop_size={"""height""": 10, """width""": 10} ) lowercase__ = pipeline( """video-classification""" , model=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , frame_sampling_rate=4 ) lowercase__ = hf_hub_download(repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" ) lowercase__ = video_classifier(_UpperCAmelCase , top_k=2 ) self.assertEqual( nested_simplify(_UpperCAmelCase , decimals=4 ) , [{"""score""": 0.5_199, """label""": """LABEL_0"""}, {"""score""": 0.4_801, """label""": """LABEL_1"""}] , ) lowercase__ = video_classifier( [ video_file_path, video_file_path, ] , top_k=2 , ) self.assertEqual( nested_simplify(_UpperCAmelCase , decimals=4 ) , [ [{"""score""": 0.5_199, """label""": """LABEL_0"""}, {"""score""": 0.4_801, """label""": """LABEL_1"""}], [{"""score""": 0.5_199, """label""": """LABEL_0"""}, {"""score""": 0.4_801, """label""": """LABEL_1"""}], ] , ) @require_tf def lowerCamelCase__ (self : Any ) -> Union[str, Any]: """simple docstring""" pass
15
from typing import Optional from .. import Features, NamedSplit from ..packaged_modules.text.text import Text from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class A ( UpperCAmelCase__ ): '''simple docstring''' def __init__(self : Any , _UpperCAmelCase : NestedDataStructureLike[PathLike] , _UpperCAmelCase : Optional[NamedSplit] = None , _UpperCAmelCase : Optional[Features] = None , _UpperCAmelCase : str = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[int] = None , **_UpperCAmelCase : Optional[int] , ) -> List[str]: """simple docstring""" super().__init__( _UpperCAmelCase , split=_UpperCAmelCase , features=_UpperCAmelCase , cache_dir=_UpperCAmelCase , keep_in_memory=_UpperCAmelCase , streaming=_UpperCAmelCase , num_proc=_UpperCAmelCase , **_UpperCAmelCase , ) lowercase__ = path_or_paths if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else {self.split: path_or_paths} lowercase__ = Text( cache_dir=_UpperCAmelCase , data_files=_UpperCAmelCase , features=_UpperCAmelCase , **_UpperCAmelCase , ) def lowerCamelCase__ (self : Tuple ) -> Any: """simple docstring""" if self.streaming: lowercase__ = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: lowercase__ = None lowercase__ = None lowercase__ = None lowercase__ = None self.builder.download_and_prepare( download_config=_UpperCAmelCase , download_mode=_UpperCAmelCase , verification_mode=_UpperCAmelCase , base_path=_UpperCAmelCase , num_proc=self.num_proc , ) lowercase__ = self.builder.as_dataset( split=self.split , verification_mode=_UpperCAmelCase , in_memory=self.keep_in_memory ) return dataset
15
1
import os from argparse import ArgumentParser from typing import List import torch.utils.data from datasets import Dataset, IterableDataset from datasets.distributed import split_dataset_by_node UpperCamelCase__ : Optional[int] = 4 UpperCamelCase__ : int = 3 class __snake_case ( lowerCAmelCase__ ): pass def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[str] ): """simple docstring""" for shard in shards: for i in range(_SCREAMING_SNAKE_CASE ): yield {"i": i, "shard": shard} def _UpperCAmelCase ( ): """simple docstring""" SCREAMING_SNAKE_CASE_ = int(os.environ['RANK'] ) SCREAMING_SNAKE_CASE_ = int(os.environ['WORLD_SIZE'] ) SCREAMING_SNAKE_CASE_ = ArgumentParser() parser.add_argument('--streaming' , type=_SCREAMING_SNAKE_CASE ) parser.add_argument('--local_rank' , type=_SCREAMING_SNAKE_CASE ) parser.add_argument('--num_workers' , type=_SCREAMING_SNAKE_CASE , default=0 ) SCREAMING_SNAKE_CASE_ = parser.parse_args() SCREAMING_SNAKE_CASE_ = args.streaming SCREAMING_SNAKE_CASE_ = args.num_workers SCREAMING_SNAKE_CASE_ = {'shards': [f"""shard_{shard_idx}""" for shard_idx in range(_SCREAMING_SNAKE_CASE )]} SCREAMING_SNAKE_CASE_ = IterableDataset.from_generator(_SCREAMING_SNAKE_CASE , gen_kwargs=_SCREAMING_SNAKE_CASE ) if not streaming: SCREAMING_SNAKE_CASE_ = Dataset.from_list(list(_SCREAMING_SNAKE_CASE ) ) SCREAMING_SNAKE_CASE_ = split_dataset_by_node(_SCREAMING_SNAKE_CASE , rank=_SCREAMING_SNAKE_CASE , world_size=_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = torch.utils.data.DataLoader(_SCREAMING_SNAKE_CASE , num_workers=_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = NUM_SHARDS * NUM_ITEMS_PER_SHARD SCREAMING_SNAKE_CASE_ = full_size // world_size expected_local_size += int(rank < (full_size % world_size) ) SCREAMING_SNAKE_CASE_ = sum(1 for _ in dataloader ) if local_size != expected_local_size: raise FailedTestError(f"""local_size {local_size} != expected_local_size {expected_local_size}""" ) if __name__ == "__main__": main()
707
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) UpperCamelCase__ : Tuple = { "configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"], "processing_trocr": ["TrOCRProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Tuple = [ "TROCR_PRETRAINED_MODEL_ARCHIVE_LIST", "TrOCRForCausalLM", "TrOCRPreTrainedModel", ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys UpperCamelCase__ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
620
0
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging UpperCamelCase : Optional[Any] = logging.get_logger(__name__) if is_vision_available(): import PIL class lowerCamelCase__ ( UpperCAmelCase_ ): lowerCAmelCase = ["""pixel_values"""] def __init__( self : List[str] , _lowercase : bool = True , _lowercase : Dict[str, int] = None , _lowercase : PILImageResampling = PILImageResampling.BICUBIC , _lowercase : bool = True , _lowercase : Dict[str, int] = None , _lowercase : bool = True , _lowercase : Union[int, float] = 1 / 255 , _lowercase : bool = True , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : bool = True , **_lowercase : Tuple , ): super().__init__(**_lowercase ) A = size if size is not None else {'shortest_edge': 224} A = get_size_dict(_lowercase , default_to_square=_lowercase ) A = crop_size if crop_size is not None else {'height': 224, 'width': 224} A = get_size_dict(_lowercase , default_to_square=_lowercase , param_name='crop_size' ) A = do_resize A = size A = resample A = do_center_crop A = crop_size A = do_rescale A = rescale_factor A = do_normalize A = image_mean if image_mean is not None else OPENAI_CLIP_MEAN A = image_std if image_std is not None else OPENAI_CLIP_STD A = do_convert_rgb def __a ( self : str , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : PILImageResampling = PILImageResampling.BICUBIC , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : List[str] , ): A = get_size_dict(_lowercase , default_to_square=_lowercase ) if "shortest_edge" not in size: raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' ) A = get_resize_output_image_size(_lowercase , size=size['shortest_edge'] , default_to_square=_lowercase ) return resize(_lowercase , size=_lowercase , resample=_lowercase , data_format=_lowercase , **_lowercase ) def __a ( self : int , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : str , ): A = get_size_dict(_lowercase ) if "height" not in size or "width" not in size: raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}' ) return center_crop(_lowercase , size=(size['height'], size['width']) , data_format=_lowercase , **_lowercase ) def __a ( self : List[str] , _lowercase : np.ndarray , _lowercase : Union[int, float] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : List[str] , ): return rescale(_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase ) def __a ( self : List[str] , _lowercase : np.ndarray , _lowercase : Union[float, List[float]] , _lowercase : Union[float, List[float]] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Union[str, Any] , ): return normalize(_lowercase , mean=_lowercase , std=_lowercase , data_format=_lowercase , **_lowercase ) def __a ( self : Optional[int] , _lowercase : ImageInput , _lowercase : bool = None , _lowercase : Dict[str, int] = None , _lowercase : PILImageResampling = None , _lowercase : bool = None , _lowercase : int = None , _lowercase : bool = None , _lowercase : float = None , _lowercase : bool = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : bool = None , _lowercase : Optional[Union[str, TensorType]] = None , _lowercase : Optional[ChannelDimension] = ChannelDimension.FIRST , **_lowercase : int , ): A = do_resize if do_resize is not None else self.do_resize A = size if size is not None else self.size A = get_size_dict(_lowercase , param_name='size' , default_to_square=_lowercase ) A = resample if resample is not None else self.resample A = do_center_crop if do_center_crop is not None else self.do_center_crop A = crop_size if crop_size is not None else self.crop_size A = get_size_dict(_lowercase , param_name='crop_size' , default_to_square=_lowercase ) A = do_rescale if do_rescale is not None else self.do_rescale A = rescale_factor if rescale_factor is not None else self.rescale_factor A = do_normalize if do_normalize is not None else self.do_normalize A = image_mean if image_mean is not None else self.image_mean A = image_std if image_std is not None else self.image_std A = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb A = make_list_of_images(_lowercase ) if not valid_images(_lowercase ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # PIL RGBA images are converted to RGB if do_convert_rgb: A = [convert_to_rgb(_lowercase ) for image in images] # All transformations expect numpy arrays. A = [to_numpy_array(_lowercase ) for image in images] if do_resize: A = [self.resize(image=_lowercase , size=_lowercase , resample=_lowercase ) for image in images] if do_center_crop: A = [self.center_crop(image=_lowercase , size=_lowercase ) for image in images] if do_rescale: A = [self.rescale(image=_lowercase , scale=_lowercase ) for image in images] if do_normalize: A = [self.normalize(image=_lowercase , mean=_lowercase , std=_lowercase ) for image in images] A = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images] A = {'pixel_values': images} return BatchFeature(data=_lowercase , tensor_type=_lowercase )
690
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCamelCase : Optional[int] = logging.get_logger(__name__) UpperCamelCase : int = {"vocab_file": "sentencepiece.model"} UpperCamelCase : Union[str, Any] = { "vocab_file": { "google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model", }, } UpperCamelCase : Union[str, Any] = { "google/rembert": 256, } class lowerCamelCase__ ( UpperCAmelCase_ ): lowerCAmelCase = VOCAB_FILES_NAMES lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : Dict , _lowercase : Optional[Any] , _lowercase : Optional[Any]=False , _lowercase : Dict=True , _lowercase : List[str]=True , _lowercase : int="[CLS]" , _lowercase : str="[SEP]" , _lowercase : List[str]="[UNK]" , _lowercase : List[Any]="[SEP]" , _lowercase : Union[str, Any]="[PAD]" , _lowercase : List[str]="[CLS]" , _lowercase : Any="[MASK]" , **_lowercase : Optional[Any] , ): super().__init__( do_lower_case=_lowercase , remove_space=_lowercase , keep_accents=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , **_lowercase , ) A = do_lower_case A = remove_space A = keep_accents A = vocab_file A = spm.SentencePieceProcessor() self.sp_model.Load(_lowercase ) @property def __a ( self : Tuple ): return len(self.sp_model ) def __a ( self : List[str] ): A = {self.convert_ids_to_tokens(_lowercase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Tuple ): A = self.__dict__.copy() A = None return state def __setstate__( self : List[str] , _lowercase : int ): A = d A = spm.SentencePieceProcessor() self.sp_model.Load(self.vocab_file ) def __a ( self : Dict , _lowercase : Union[str, Any] , _lowercase : Dict=False ): A = self.sp_model.EncodeAsPieces(_lowercase ) return pieces def __a ( self : Dict , _lowercase : Tuple ): return self.sp_model.PieceToId(_lowercase ) def __a ( self : str , _lowercase : Optional[int] ): return self.sp_model.IdToPiece(_lowercase ) def __a ( self : Optional[int] , _lowercase : Optional[int] ): A = self.sp_model.decode_pieces(_lowercase ) return out_string def __a ( self : Optional[int] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ): A = [self.sep_token_id] A = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def __a ( self : Any , _lowercase : List[int] , _lowercase : Optional[List[int]] = None , _lowercase : bool = False ): if already_has_special_tokens: if token_ids_a is not None: raise ValueError( 'You should not supply a second sequence if the provided sequence of ' 'ids is already formatted with special tokens for the model.' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(_lowercase )) + [1] + ([0] * len(_lowercase )) + [1] return [1] + ([0] * len(_lowercase )) + [1] def __a ( self : str , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ): A = [self.sep_token_id] A = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __a ( self : Optional[Any] , _lowercase : str , _lowercase : Optional[str] = None ): if not os.path.isdir(_lowercase ): logger.error('Vocabulary path ({}) should be a directory'.format(_lowercase ) ) return A = os.path.join( _lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ): copyfile(self.vocab_file , _lowercase ) return (out_vocab_file,)
690
1
'''simple docstring''' from itertools import permutations def lowerCAmelCase__ ( UpperCAmelCase ): """simple docstring""" if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False snake_case__ : Union[str, Any] = [7, 11, 13, 17] for i, test in enumerate(UpperCAmelCase ): if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0: return False return True def lowerCAmelCase__ ( UpperCAmelCase = 10 ): """simple docstring""" return sum( int("""""".join(map(UpperCAmelCase , UpperCAmelCase ) ) ) for num in permutations(range(UpperCAmelCase ) ) if is_substring_divisible(UpperCAmelCase ) ) if __name__ == "__main__": print(f"""{solution() = }""")
701
'''simple docstring''' class _A : '''simple docstring''' def __init__( self : List[Any] )-> List[str]: snake_case__ : List[str] = """""" snake_case__ : Dict = """""" snake_case__ : Union[str, Any] = [] def __lowerCAmelCase ( self : Any , lowerCamelCase : int , lowerCamelCase : int )-> int: if m == -1: return n + 1 elif n == -1: return m + 1 elif self.dp[m][n] > -1: return self.dp[m][n] else: if self.worda[m] == self.worda[n]: snake_case__ : Any = self.__min_dist_top_down_dp(m - 1 , n - 1 ) else: snake_case__ : List[Any] = self.__min_dist_top_down_dp(lowerCamelCase , n - 1 ) snake_case__ : Any = self.__min_dist_top_down_dp(m - 1 , lowerCamelCase ) snake_case__ : Optional[int] = self.__min_dist_top_down_dp(m - 1 , n - 1 ) snake_case__ : Dict = 1 + min(lowerCamelCase , lowerCamelCase , lowerCamelCase ) return self.dp[m][n] def __lowerCAmelCase ( self : List[str] , lowerCamelCase : str , lowerCamelCase : str )-> int: snake_case__ : Optional[int] = worda snake_case__ : List[str] = worda snake_case__ : List[str] = [[-1 for _ in range(len(lowerCamelCase ) )] for _ in range(len(lowerCamelCase ) )] return self.__min_dist_top_down_dp(len(lowerCamelCase ) - 1 , len(lowerCamelCase ) - 1 ) def __lowerCAmelCase ( self : List[Any] , lowerCamelCase : str , lowerCamelCase : str )-> int: snake_case__ : List[str] = worda snake_case__ : int = worda snake_case__ : Any = len(lowerCamelCase ) snake_case__ : List[str] = len(lowerCamelCase ) snake_case__ : List[str] = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )] for i in range(m + 1 ): for j in range(n + 1 ): if i == 0: # first string is empty snake_case__ : Union[str, Any] = j elif j == 0: # second string is empty snake_case__ : List[str] = i elif worda[i - 1] == worda[j - 1]: # last characters are equal snake_case__ : Tuple = self.dp[i - 1][j - 1] else: snake_case__ : int = self.dp[i][j - 1] snake_case__ : List[Any] = self.dp[i - 1][j] snake_case__ : List[str] = self.dp[i - 1][j - 1] snake_case__ : Tuple = 1 + min(lowerCamelCase , lowerCamelCase , lowerCamelCase ) return self.dp[m][n] if __name__ == "__main__": lowerCAmelCase__ = EditDistance() print('****************** Testing Edit Distance DP Algorithm ******************') print() lowerCAmelCase__ = input('Enter the first string: ').strip() lowerCAmelCase__ = input('Enter the second string: ').strip() print() print(f"""The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}""") print(f"""The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}""") print() print('*************** End of Testing Edit Distance DP Algorithm ***************')
172
0
import unittest from transformers import AutoTokenizer, NystromformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, NystromformerModel, ) from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCAmelCase : '''simple docstring''' def __init__( self : Optional[Any] , __lowercase : int , __lowercase : str=13 , __lowercase : Tuple=7 , __lowercase : Any=True , __lowercase : Optional[int]=True , __lowercase : Any=True , __lowercase : Tuple=True , __lowercase : List[Any]=99 , __lowercase : List[Any]=32 , __lowercase : List[Any]=5 , __lowercase : str=4 , __lowercase : Optional[int]=37 , __lowercase : Any="gelu" , __lowercase : int=0.1 , __lowercase : Dict=0.1 , __lowercase : List[Any]=5_12 , __lowercase : str=16 , __lowercase : Any=2 , __lowercase : List[str]=0.02 , __lowercase : List[str]=3 , __lowercase : List[str]=4 , __lowercase : List[str]=None , ): """simple docstring""" snake_case_ = parent snake_case_ = batch_size snake_case_ = seq_length snake_case_ = is_training snake_case_ = use_input_mask snake_case_ = use_token_type_ids snake_case_ = use_labels snake_case_ = vocab_size snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = max_position_embeddings snake_case_ = type_vocab_size snake_case_ = type_sequence_label_size snake_case_ = initializer_range snake_case_ = num_labels snake_case_ = num_choices snake_case_ = scope def snake_case__ ( self : int ): """simple docstring""" snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case_ = None if self.use_input_mask: snake_case_ = random_attention_mask([self.batch_size, self.seq_length] ) snake_case_ = None if self.use_token_type_ids: snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) snake_case_ = None snake_case_ = None snake_case_ = None if self.use_labels: snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) snake_case_ = ids_tensor([self.batch_size] , self.num_choices ) snake_case_ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def snake_case__ ( self : Optional[Any] ): """simple docstring""" return NystromformerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowercase , initializer_range=self.initializer_range , ) def snake_case__ ( self : str , __lowercase : Union[str, Any] , __lowercase : List[str] , __lowercase : str , __lowercase : Optional[Any] , __lowercase : Union[str, Any] , __lowercase : Dict , __lowercase : Tuple ): """simple docstring""" snake_case_ = NystromformerModel(config=__lowercase ) model.to(__lowercase ) model.eval() snake_case_ = model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase ) snake_case_ = model(__lowercase , token_type_ids=__lowercase ) snake_case_ = model(__lowercase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def snake_case__ ( self : Any , __lowercase : Union[str, Any] , __lowercase : List[str] , __lowercase : Any , __lowercase : int , __lowercase : int , __lowercase : Optional[Any] , __lowercase : int ): """simple docstring""" snake_case_ = NystromformerForMaskedLM(config=__lowercase ) model.to(__lowercase ) model.eval() snake_case_ = model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def snake_case__ ( self : Optional[int] , __lowercase : Union[str, Any] , __lowercase : List[str] , __lowercase : Optional[int] , __lowercase : List[Any] , __lowercase : str , __lowercase : Union[str, Any] , __lowercase : Tuple ): """simple docstring""" snake_case_ = NystromformerForQuestionAnswering(config=__lowercase ) model.to(__lowercase ) model.eval() snake_case_ = model( __lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , start_positions=__lowercase , end_positions=__lowercase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def snake_case__ ( self : int , __lowercase : Dict , __lowercase : str , __lowercase : int , __lowercase : str , __lowercase : Optional[Any] , __lowercase : List[Any] , __lowercase : List[Any] ): """simple docstring""" snake_case_ = self.num_labels snake_case_ = NystromformerForSequenceClassification(__lowercase ) model.to(__lowercase ) model.eval() snake_case_ = model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def snake_case__ ( self : Dict , __lowercase : List[Any] , __lowercase : List[Any] , __lowercase : List[Any] , __lowercase : Union[str, Any] , __lowercase : Optional[int] , __lowercase : List[Any] , __lowercase : Dict ): """simple docstring""" snake_case_ = self.num_labels snake_case_ = NystromformerForTokenClassification(config=__lowercase ) model.to(__lowercase ) model.eval() snake_case_ = model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def snake_case__ ( self : Optional[Any] , __lowercase : List[Any] , __lowercase : List[str] , __lowercase : Union[str, Any] , __lowercase : Any , __lowercase : Union[str, Any] , __lowercase : Union[str, Any] , __lowercase : str ): """simple docstring""" snake_case_ = self.num_choices snake_case_ = NystromformerForMultipleChoice(config=__lowercase ) model.to(__lowercase ) model.eval() snake_case_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() snake_case_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() snake_case_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() snake_case_ = model( __lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def snake_case__ ( self : Tuple ): """simple docstring""" snake_case_ = self.prepare_config_and_inputs() ( ( snake_case_ ) , ( snake_case_ ) , ( snake_case_ ) , ( snake_case_ ) , ( snake_case_ ) , ( snake_case_ ) , ( snake_case_ ) , ) = config_and_inputs snake_case_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ = ( ( NystromformerModel, NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, ) if is_torch_available() else () ) lowerCAmelCase_ = ( { '''feature-extraction''': NystromformerModel, '''fill-mask''': NystromformerForMaskedLM, '''question-answering''': NystromformerForQuestionAnswering, '''text-classification''': NystromformerForSequenceClassification, '''token-classification''': NystromformerForTokenClassification, '''zero-shot''': NystromformerForSequenceClassification, } if is_torch_available() else {} ) lowerCAmelCase_ = False lowerCAmelCase_ = False def snake_case__ ( self : Dict ): """simple docstring""" snake_case_ = NystromformerModelTester(self ) snake_case_ = ConfigTester(self , config_class=__lowercase , hidden_size=37 ) def snake_case__ ( self : Any ): """simple docstring""" self.config_tester.run_common_tests() def snake_case__ ( self : Dict ): """simple docstring""" snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowercase ) def snake_case__ ( self : Optional[int] ): """simple docstring""" snake_case_ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: snake_case_ = type self.model_tester.create_and_check_model(*__lowercase ) def snake_case__ ( self : Tuple ): """simple docstring""" snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__lowercase ) def snake_case__ ( self : Dict ): """simple docstring""" snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__lowercase ) def snake_case__ ( self : Tuple ): """simple docstring""" snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__lowercase ) def snake_case__ ( self : Optional[Any] ): """simple docstring""" snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__lowercase ) def snake_case__ ( self : List[Any] ): """simple docstring""" snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__lowercase ) @slow def snake_case__ ( self : Optional[int] ): """simple docstring""" for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case_ = NystromformerModel.from_pretrained(__lowercase ) self.assertIsNotNone(__lowercase ) @require_torch class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @slow def snake_case__ ( self : Union[str, Any] ): """simple docstring""" snake_case_ = NystromformerModel.from_pretrained("uw-madison/nystromformer-512" ) snake_case_ = torch.tensor([[0, 1, 2, 3, 4, 5]] ) with torch.no_grad(): snake_case_ = model(__lowercase )[0] snake_case_ = torch.Size((1, 6, 7_68) ) self.assertEqual(output.shape , __lowercase ) snake_case_ = torch.tensor( [[[-0.4532, -0.0936, 0.5137], [-0.2676, 0.0628, 0.6186], [-0.3629, -0.1726, 0.4716]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __lowercase , atol=1E-4 ) ) @slow def snake_case__ ( self : int ): """simple docstring""" snake_case_ = "the [MASK] of Belgium is Brussels" snake_case_ = AutoTokenizer.from_pretrained("uw-madison/nystromformer-512" ) snake_case_ = NystromformerForMaskedLM.from_pretrained("uw-madison/nystromformer-512" ) snake_case_ = tokenizer(__lowercase , return_tensors="pt" ) with torch.no_grad(): snake_case_ = model(encoding.input_ids ).logits snake_case_ = token_logits[:, 2, :].argmax(-1 )[0] self.assertEqual(tokenizer.decode(__lowercase ) , "capital" )
376
import numpy as np import qiskit def lowerCamelCase__ ( _A = 8 , _A = None ): '''simple docstring''' snake_case_ = np.random.default_rng(seed=_A ) # Roughly 25% of the qubits will contribute to the key. # So we take more than we need. snake_case_ = 6 * key_len # Measurement basis for Alice's qubits. snake_case_ = rng.integers(2 , size=_A ) # The set of states Alice will prepare. snake_case_ = rng.integers(2 , size=_A ) # Measurement basis for Bob's qubits. snake_case_ = rng.integers(2 , size=_A ) # Quantum Circuit to simulate BB84 snake_case_ = qiskit.QuantumCircuit(_A , name="BB84" ) # Alice prepares her qubits according to rules above. for index, _ in enumerate(_A ): if alice_state[index] == 1: bbaa_circ.x(_A ) if alice_basis[index] == 1: bbaa_circ.h(_A ) bbaa_circ.barrier() # Bob measures the received qubits according to rules above. for index, _ in enumerate(_A ): if bob_basis[index] == 1: bbaa_circ.h(_A ) bbaa_circ.barrier() bbaa_circ.measure_all() # Simulate the quantum circuit. snake_case_ = qiskit.Aer.get_backend("aer_simulator" ) # We only need to run one shot because the key is unique. # Multiple shots will produce the same key. snake_case_ = qiskit.execute(_A , _A , shots=1 , seed_simulator=_A ) # Returns the result of measurement. snake_case_ = job.result().get_counts(_A ).most_frequent() # Extracting the generated key from the simulation results. # Only keep measurement results where Alice and Bob chose the same basis. snake_case_ = "".join( [ result_bit for alice_basis_bit, bob_basis_bit, result_bit in zip( _A , _A , _A ) if alice_basis_bit == bob_basis_bit ] ) # Get final key. Pad with 0 if too short, otherwise truncate. snake_case_ = gen_key[:key_len] if len(_A ) >= key_len else gen_key.ljust(_A , "0" ) return key if __name__ == "__main__": print(f'''The generated key is : {bbaa(8, seed=0)}''') from doctest import testmod testmod()
376
1
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" def __init__( self : Union[str, Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any]=7 , lowerCAmelCase_ : Any=3 , lowerCAmelCase_ : List[Any]=1_8 , lowerCAmelCase_ : str=3_0 , lowerCAmelCase_ : Tuple=4_0_0 , lowerCAmelCase_ : str=True , lowerCAmelCase_ : str=None , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Union[str, Any]=True , ) -> Tuple: __lowerCAmelCase = size if size is not None else {'shortest_edge': 2_0} __lowerCAmelCase = crop_size if crop_size is not None else {'height': 1_8, 'width': 1_8} __lowerCAmelCase = parent __lowerCAmelCase = batch_size __lowerCAmelCase = num_channels __lowerCAmelCase = image_size __lowerCAmelCase = min_resolution __lowerCAmelCase = max_resolution __lowerCAmelCase = do_resize __lowerCAmelCase = size __lowerCAmelCase = do_center_crop __lowerCAmelCase = crop_size __lowerCAmelCase = do_flip_channel_order def lowercase ( self : Any ) -> List[str]: return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_flip_channel_order": self.do_flip_channel_order, } @require_torch @require_vision class _UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ): """simple docstring""" a_ = MobileViTImageProcessor if is_vision_available() else None def lowercase ( self : Any ) -> str: __lowerCAmelCase = MobileViTImageProcessingTester(self ) @property def lowercase ( self : Dict ) -> Tuple: return self.image_processor_tester.prepare_image_processor_dict() def lowercase ( self : str ) -> Optional[int]: __lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCAmelCase_ , 'do_resize' ) ) self.assertTrue(hasattr(lowerCAmelCase_ , 'size' ) ) self.assertTrue(hasattr(lowerCAmelCase_ , 'do_center_crop' ) ) self.assertTrue(hasattr(lowerCAmelCase_ , 'center_crop' ) ) self.assertTrue(hasattr(lowerCAmelCase_ , 'do_flip_channel_order' ) ) def lowercase ( self : List[str] ) -> Any: __lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'shortest_edge': 2_0} ) self.assertEqual(image_processor.crop_size , {'height': 1_8, 'width': 1_8} ) __lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 ) self.assertEqual(image_processor.size , {'shortest_edge': 4_2} ) self.assertEqual(image_processor.crop_size , {'height': 8_4, 'width': 8_4} ) def lowercase ( self : Union[str, Any] ) -> Optional[int]: pass def lowercase ( self : List[str] ) -> List[Any]: # Initialize image_processing __lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase_ , Image.Image ) # Test not batched input __lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched __lowerCAmelCase = image_processing(lowerCAmelCase_ , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def lowercase ( self : Optional[int] ) -> List[Any]: # Initialize image_processing __lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , numpify=lowerCAmelCase_ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase_ , np.ndarray ) # Test not batched input __lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched __lowerCAmelCase = image_processing(lowerCAmelCase_ , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def lowercase ( self : Dict ) -> Union[str, Any]: # Initialize image_processing __lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , torchify=lowerCAmelCase_ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase_ , torch.Tensor ) # Test not batched input __lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched __lowerCAmelCase = image_processing(lowerCAmelCase_ , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , )
704
import argparse from copy import deepcopy import numpy as np from datasets import ClassLabel, DatasetDict, load_dataset from evaluate import load from transformers import ( AutoModelForSequenceClassification, AutoTokenizer, DataCollatorWithPadding, Trainer, TrainerCallback, TrainingArguments, set_seed, ) def a_ ( ): __lowerCAmelCase = argparse.ArgumentParser() parser.add_argument('--model_ckpt', type=lowerCAmelCase_, default='microsoft/unixcoder-base-nine' ) parser.add_argument('--num_epochs', type=lowerCAmelCase_, default=5 ) parser.add_argument('--batch_size', type=lowerCAmelCase_, default=6 ) parser.add_argument('--gradient_accumulation_steps', type=lowerCAmelCase_, default=1 ) parser.add_argument('--freeze', type=lowerCAmelCase_, default=lowerCAmelCase_ ) parser.add_argument('--learning_rate', type=lowerCAmelCase_, default=5E-4 ) parser.add_argument('--seed', type=lowerCAmelCase_, default=0 ) parser.add_argument('--lr_scheduler_type', type=lowerCAmelCase_, default='cosine' ) parser.add_argument('--num_warmup_steps', type=lowerCAmelCase_, default=10 ) parser.add_argument('--weight_decay', type=lowerCAmelCase_, default=0.01 ) parser.add_argument('--output_dir', type=lowerCAmelCase_, default='./results' ) return parser.parse_args() _snake_case : Union[str, Any] = load('accuracy') def a_ ( lowerCAmelCase_ : List[Any] ): __lowerCAmelCase , __lowerCAmelCase = eval_pred __lowerCAmelCase = np.argmax(lowerCAmelCase_, axis=1 ) return metric.compute(predictions=lowerCAmelCase_, references=lowerCAmelCase_ ) class _UpperCAmelCase ( _UpperCamelCase ): """simple docstring""" def __init__( self : List[str] , lowerCAmelCase_ : List[Any] ) -> None: super().__init__() __lowerCAmelCase = trainer def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str] , **lowerCAmelCase_ : Optional[int] ) -> Dict: if control.should_evaluate: __lowerCAmelCase = deepcopy(lowerCAmelCase_ ) self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix='train' ) return control_copy def a_ ( ): __lowerCAmelCase = get_args() set_seed(args.seed ) __lowerCAmelCase = load_dataset('codeparrot/codecomplex', split='train' ) __lowerCAmelCase = dataset.train_test_split(test_size=0.2 ) __lowerCAmelCase = train_test['test'].train_test_split(test_size=0.5 ) __lowerCAmelCase = DatasetDict( { 'train': train_test['train'], 'test': test_validation['train'], 'valid': test_validation['test'], } ) print('Loading tokenizer and model' ) __lowerCAmelCase = AutoTokenizer.from_pretrained(args.model_ckpt ) __lowerCAmelCase = tokenizer.eos_token __lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt, num_labels=7 ) __lowerCAmelCase = model.config.eos_token_id if args.freeze: for param in model.roberta.parameters(): __lowerCAmelCase = False __lowerCAmelCase = ClassLabel(num_classes=7, names=list(set(train_test_validation['train']['complexity'] ) ) ) def tokenize(lowerCAmelCase_ : Union[str, Any] ): __lowerCAmelCase = tokenizer(example['src'], truncation=lowerCAmelCase_, max_length=1024 ) __lowerCAmelCase = labels.straint(example['complexity'] ) return { "input_ids": inputs["input_ids"], "attention_mask": inputs["attention_mask"], "label": label, } __lowerCAmelCase = train_test_validation.map( lowerCAmelCase_, batched=lowerCAmelCase_, remove_columns=train_test_validation['train'].column_names, ) __lowerCAmelCase = DataCollatorWithPadding(tokenizer=lowerCAmelCase_ ) __lowerCAmelCase = TrainingArguments( output_dir=args.output_dir, learning_rate=args.learning_rate, lr_scheduler_type=args.lr_scheduler_type, evaluation_strategy='epoch', save_strategy='epoch', logging_strategy='epoch', per_device_train_batch_size=args.batch_size, per_device_eval_batch_size=args.batch_size, num_train_epochs=args.num_epochs, gradient_accumulation_steps=args.gradient_accumulation_steps, weight_decay=0.01, metric_for_best_model='accuracy', run_name='complexity-java', report_to='wandb', ) __lowerCAmelCase = Trainer( model=lowerCAmelCase_, args=lowerCAmelCase_, train_dataset=tokenized_datasets['train'], eval_dataset=tokenized_datasets['valid'], tokenizer=lowerCAmelCase_, data_collator=lowerCAmelCase_, compute_metrics=lowerCAmelCase_, ) print('Training...' ) trainer.add_callback(CustomCallback(lowerCAmelCase_ ) ) trainer.train() if __name__ == "__main__": main()
421
0
import os try: from .build_directory_md import good_file_paths except ImportError: from build_directory_md import good_file_paths # type: ignore UpperCAmelCase : List[str] = list(good_file_paths()) assert filepaths, "good_file_paths() failed!" UpperCAmelCase : List[str] = [file for file in filepaths if file != file.lower()] if upper_files: print(F"""{len(upper_files)} files contain uppercase characters:""") print('''\n'''.join(upper_files) + '''\n''') UpperCAmelCase : int = [file for file in filepaths if ''' ''' in file] if space_files: print(F"""{len(space_files)} files contain space characters:""") print('''\n'''.join(space_files) + '''\n''') UpperCAmelCase : List[Any] = [file for file in filepaths if '''-''' in file] if hyphen_files: print(F"""{len(hyphen_files)} files contain hyphen characters:""") print('''\n'''.join(hyphen_files) + '''\n''') UpperCAmelCase : Optional[Any] = [file for file in filepaths if os.sep not in file] if nodir_files: print(F"""{len(nodir_files)} files are not in a directory:""") print('''\n'''.join(nodir_files) + '''\n''') UpperCAmelCase : int = len(upper_files + space_files + hyphen_files + nodir_files) if bad_files: import sys sys.exit(bad_files)
239
"""simple docstring""" from typing import Union import fire import torch from tqdm import tqdm def a ( __UpperCAmelCase : str , __UpperCAmelCase : str = "cpu" , __UpperCAmelCase : Union[str, None] = None ) -> None: __magic_name__: List[Any] = torch.load(__UpperCAmelCase , map_location=__UpperCAmelCase ) for k, v in tqdm(state_dict.items() ): if not isinstance(__UpperCAmelCase , torch.Tensor ): raise TypeError("""FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin""" ) __magic_name__: Any = v.half() if save_path is None: # overwrite src_path __magic_name__: List[str] = src_path torch.save(__UpperCAmelCase , __UpperCAmelCase ) if __name__ == "__main__": fire.Fire(convert)
96
0
'''simple docstring''' def _SCREAMING_SNAKE_CASE( snake_case_ : List[str] ) ->List[str]: '''simple docstring''' _lowercase : List[str] = [] _lowercase : Tuple = set({'''(''', '''[''', '''{'''} ) _lowercase : Union[str, Any] = set({''')''', ''']''', '''}'''} ) _lowercase : Dict = {'''{''': '''}''', '''[''': ''']''', '''(''': ''')'''} for i in range(len(snake_case_ ) ): if s[i] in open_brackets: stack.append(s[i] ) elif s[i] in closed_brackets and ( len(snake_case_ ) == 0 or (len(snake_case_ ) > 0 and open_to_closed[stack.pop()] != s[i]) ): return False return len(snake_case_ ) == 0 def _SCREAMING_SNAKE_CASE( ) ->Optional[Any]: '''simple docstring''' _lowercase : Union[str, Any] = input('''Enter sequence of brackets: ''' ) if is_balanced(snake_case_ ): print(snake_case_ , '''is balanced''' ) else: print(snake_case_ , '''is not balanced''' ) if __name__ == "__main__": main()
411
'''simple docstring''' lowerCamelCase__ = 2_56 # Modulus to hash a string lowerCamelCase__ = 1_00_00_03 def _SCREAMING_SNAKE_CASE( snake_case_ : str , snake_case_ : str ) ->bool: '''simple docstring''' _lowercase : int = len(snake_case_ ) _lowercase : str = len(snake_case_ ) if p_len > t_len: return False _lowercase : List[str] = 0 _lowercase : Any = 0 _lowercase : Union[str, Any] = 1 # Calculating the hash of pattern and substring of text for i in range(snake_case_ ): _lowercase : List[Any] = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus _lowercase : List[str] = (ord(text[i] ) + text_hash * alphabet_size) % modulus if i == p_len - 1: continue _lowercase : Optional[Any] = (modulus_power * alphabet_size) % modulus for i in range(0 , t_len - p_len + 1 ): if text_hash == p_hash and text[i : i + p_len] == pattern: return True if i == t_len - p_len: continue # Calculate the https://en.wikipedia.org/wiki/Rolling_hash _lowercase : Optional[int] = ( (text_hash - ord(text[i] ) * modulus_power) * alphabet_size + ord(text[i + p_len] ) ) % modulus return False def _SCREAMING_SNAKE_CASE( ) ->None: '''simple docstring''' _lowercase : List[str] = '''abc1abc12''' _lowercase : int = '''alskfjaldsabc1abc1abc12k23adsfabcabc''' _lowercase : List[str] = '''alskfjaldsk23adsfabcabc''' assert rabin_karp(snake_case_ , snake_case_ ) and not rabin_karp(snake_case_ , snake_case_ ) # Test 2) _lowercase : int = '''ABABX''' _lowercase : Any = '''ABABZABABYABABX''' assert rabin_karp(snake_case_ , snake_case_ ) # Test 3) _lowercase : Tuple = '''AAAB''' _lowercase : Tuple = '''ABAAAAAB''' assert rabin_karp(snake_case_ , snake_case_ ) # Test 4) _lowercase : Dict = '''abcdabcy''' _lowercase : List[Any] = '''abcxabcdabxabcdabcdabcy''' assert rabin_karp(snake_case_ , snake_case_ ) # Test 5) _lowercase : Tuple = '''Lü''' _lowercase : Any = '''Lüsai''' assert rabin_karp(snake_case_ , snake_case_ ) _lowercase : Tuple = '''Lue''' assert not rabin_karp(snake_case_ , snake_case_ ) print('''Success.''' ) if __name__ == "__main__": test_rabin_karp()
411
1
"""simple docstring""" import logging import os from logging import ( CRITICAL, # NOQA DEBUG, # NOQA ERROR, # NOQA FATAL, # NOQA INFO, # NOQA NOTSET, # NOQA WARN, # NOQA WARNING, # NOQA ) from typing import Optional from tqdm import auto as tqdm_lib A_ : List[str] = { 'debug': logging.DEBUG, 'info': logging.INFO, 'warning': logging.WARNING, 'error': logging.ERROR, 'critical': logging.CRITICAL, } A_ : int = logging.WARNING def __snake_case ( ) -> Dict: '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = os.getenv('DATASETS_VERBOSITY' , __A ) if env_level_str: if env_level_str in log_levels: return log_levels[env_level_str] else: logging.getLogger().warning( F"""Unknown option DATASETS_VERBOSITY={env_level_str}, """ F"""has to be one of: { ", ".join(log_levels.keys() ) }""" ) return _default_log_level def __snake_case ( ) -> Dict: '''simple docstring''' return __name__.split('.' )[0] def __snake_case ( ) -> List[Any]: '''simple docstring''' return logging.getLogger(_get_library_name() ) def __snake_case ( ) -> Optional[Any]: '''simple docstring''' # Apply our default configuration to the library root logger. SCREAMING_SNAKE_CASE : str = _get_library_root_logger() library_root_logger.setLevel(_get_default_logging_level() ) def __snake_case ( ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = _get_library_root_logger() library_root_logger.setLevel(logging.NOTSET ) def __snake_case ( __A : str = None ) -> Optional[Any]: '''simple docstring''' if name is None: SCREAMING_SNAKE_CASE : Optional[int] = _get_library_name() return logging.getLogger(__A ) def __snake_case ( ) -> str: '''simple docstring''' return _get_library_root_logger().getEffectiveLevel() def __snake_case ( __A : List[str] ) -> Dict: '''simple docstring''' _get_library_root_logger().setLevel(__A ) def __snake_case ( ) -> Any: '''simple docstring''' return set_verbosity(__A ) def __snake_case ( ) -> Optional[Any]: '''simple docstring''' return set_verbosity(__A ) def __snake_case ( ) -> str: '''simple docstring''' return set_verbosity(__A ) def __snake_case ( ) -> List[str]: '''simple docstring''' return set_verbosity(__A ) def __snake_case ( ) -> List[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE : int = False def __snake_case ( ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = True # Configure the library root logger at the module level (singleton-like) _configure_library_root_logger() class lowerCAmelCase__ : '''simple docstring''' def __init__( self : List[Any] , *_SCREAMING_SNAKE_CASE : List[Any] , **_SCREAMING_SNAKE_CASE : Optional[Any] ) -> int: # pylint: disable=unused-argument """simple docstring""" SCREAMING_SNAKE_CASE : int = args[0] if args else None def __iter__( self : List[Any] ) -> Tuple: """simple docstring""" return iter(self._iterator ) def __getattr__( self : int , _SCREAMING_SNAKE_CASE : Union[str, Any] ) -> int: """simple docstring""" def empty_fn(*_SCREAMING_SNAKE_CASE : Optional[Any] , **_SCREAMING_SNAKE_CASE : Union[str, Any] ): # pylint: disable=unused-argument return return empty_fn def __enter__( self : str ) -> List[str]: """simple docstring""" return self def __exit__( self : List[str] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[int] ) -> List[Any]: """simple docstring""" return A_ : str = True class lowerCAmelCase__ : '''simple docstring''' def __call__( self : Any , *_SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str]=False , **_SCREAMING_SNAKE_CASE : Any ) -> Union[str, Any]: """simple docstring""" if _tqdm_active and not disable: return tqdm_lib.tqdm(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) else: return EmptyTqdm(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def _lowerCAmelCase ( self : List[Any] , *_SCREAMING_SNAKE_CASE : Optional[Any] , **_SCREAMING_SNAKE_CASE : List[Any] ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = None if _tqdm_active: return tqdm_lib.tqdm.set_lock(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def _lowerCAmelCase ( self : str ) -> Any: """simple docstring""" if _tqdm_active: return tqdm_lib.tqdm.get_lock() A_ : Dict = _tqdm_cls() def __snake_case ( ) -> int: '''simple docstring''' global _tqdm_active return bool(_tqdm_active ) def __snake_case ( ) -> Any: '''simple docstring''' global _tqdm_active SCREAMING_SNAKE_CASE : List[str] = True def __snake_case ( ) -> int: '''simple docstring''' global _tqdm_active SCREAMING_SNAKE_CASE : List[Any] = False
265
from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCamelCase =logging.get_logger(__name__) _lowerCamelCase ={ """edbeeching/decision-transformer-gym-hopper-medium""": ( """https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json""" ), # See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer } class A__ ( __SCREAMING_SNAKE_CASE): _UpperCAmelCase : Optional[int] = """decision_transformer""" _UpperCAmelCase : str = ["""past_key_values"""] _UpperCAmelCase : Any = { """max_position_embeddings""": """n_positions""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self , __magic_name__=1_7 , __magic_name__=4 , __magic_name__=1_2_8 , __magic_name__=4_0_9_6 , __magic_name__=True , __magic_name__=1 , __magic_name__=1_0_2_4 , __magic_name__=3 , __magic_name__=1 , __magic_name__=None , __magic_name__="relu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=1e-5 , __magic_name__=0.02 , __magic_name__=True , __magic_name__=True , __magic_name__=5_0_2_5_6 , __magic_name__=5_0_2_5_6 , __magic_name__=False , __magic_name__=False , **__magic_name__ , ): lowerCamelCase : Optional[int] = state_dim lowerCamelCase : int = act_dim lowerCamelCase : int = hidden_size lowerCamelCase : Union[str, Any] = max_ep_len lowerCamelCase : Optional[int] = action_tanh lowerCamelCase : Any = vocab_size lowerCamelCase : List[str] = n_positions lowerCamelCase : List[Any] = n_layer lowerCamelCase : Dict = n_head lowerCamelCase : Optional[Any] = n_inner lowerCamelCase : Tuple = activation_function lowerCamelCase : Tuple = resid_pdrop lowerCamelCase : str = embd_pdrop lowerCamelCase : Dict = attn_pdrop lowerCamelCase : Tuple = layer_norm_epsilon lowerCamelCase : Tuple = initializer_range lowerCamelCase : Tuple = scale_attn_weights lowerCamelCase : str = use_cache lowerCamelCase : List[Any] = scale_attn_by_inverse_layer_idx lowerCamelCase : List[str] = reorder_and_upcast_attn lowerCamelCase : Optional[Any] = bos_token_id lowerCamelCase : str = eos_token_id super().__init__(bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
681
0
from importlib import import_module from .logging import get_logger __UpperCamelCase : Dict = get_logger(__name__) class lowerCAmelCase__: '''simple docstring''' def __init__( self : Optional[int] , __snake_case : List[str] , __snake_case : str=None ): '''simple docstring''' UpperCAmelCase_ : Any = attrs or [] if module is not None: for key in module.__dict__: if key in attrs or not key.startswith('''__''' ): setattr(self , _lowercase , getattr(_lowercase , _lowercase ) ) UpperCAmelCase_ : Optional[int] = module._original_module if isinstance(_lowercase , _PatchedModuleObj ) else module class lowerCAmelCase__: '''simple docstring''' A_ : Any = [] def __init__( self : Optional[Any] , __snake_case : List[Any] , __snake_case : str , __snake_case : List[str] , __snake_case : Union[str, Any]=None ): '''simple docstring''' UpperCAmelCase_ : List[Any] = obj UpperCAmelCase_ : Tuple = target UpperCAmelCase_ : List[str] = new UpperCAmelCase_ : Dict = target.split('''.''' )[0] UpperCAmelCase_ : Optional[Any] = {} UpperCAmelCase_ : int = attrs or [] def __enter__( self : List[str] ): '''simple docstring''' *UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.target.split('''.''' ) # Patch modules: # it's used to patch attributes of submodules like "os.path.join"; # in this case we need to patch "os" and "os.path" for i in range(len(_lowercase ) ): try: UpperCAmelCase_ : List[Any] = import_module('''.'''.join(submodules[: i + 1] ) ) except ModuleNotFoundError: continue # We iterate over all the globals in self.obj in case we find "os" or "os.path" for attr in self.obj.__dir__(): UpperCAmelCase_ : Optional[Any] = getattr(self.obj , _lowercase ) # We don't check for the name of the global, but rather if its value *is* "os" or "os.path". # This allows to patch renamed modules like "from os import path as ospath". if obj_attr is submodule or ( (isinstance(_lowercase , _PatchedModuleObj ) and obj_attr._original_module is submodule) ): UpperCAmelCase_ : Union[str, Any] = obj_attr # patch at top level setattr(self.obj , _lowercase , _PatchedModuleObj(_lowercase , attrs=self.attrs ) ) UpperCAmelCase_ : Optional[Any] = getattr(self.obj , _lowercase ) # construct lower levels patches for key in submodules[i + 1 :]: setattr(_lowercase , _lowercase , _PatchedModuleObj(getattr(_lowercase , _lowercase , _lowercase ) , attrs=self.attrs ) ) UpperCAmelCase_ : str = getattr(_lowercase , _lowercase ) # finally set the target attribute setattr(_lowercase , _lowercase , self.new ) # Patch attribute itself: # it's used for builtins like "open", # and also to patch "os.path.join" we may also need to patch "join" # itself if it was imported as "from os.path import join". if submodules: # if it's an attribute of a submodule like "os.path.join" try: UpperCAmelCase_ : Dict = getattr(import_module('''.'''.join(_lowercase ) ) , _lowercase ) except (AttributeError, ModuleNotFoundError): return # We iterate over all the globals in self.obj in case we find "os.path.join" for attr in self.obj.__dir__(): # We don't check for the name of the global, but rather if its value *is* "os.path.join". # This allows to patch renamed attributes like "from os.path import join as pjoin". if getattr(self.obj , _lowercase ) is attr_value: UpperCAmelCase_ : List[Any] = getattr(self.obj , _lowercase ) setattr(self.obj , _lowercase , self.new ) elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open" UpperCAmelCase_ : List[str] = globals()['''__builtins__'''][target_attr] setattr(self.obj , _lowercase , self.new ) else: raise RuntimeError(f'''Tried to patch attribute {target_attr} instead of a submodule.''' ) def __exit__( self : List[Any] , *__snake_case : Dict ): '''simple docstring''' for attr in list(self.original ): setattr(self.obj , _lowercase , self.original.pop(_lowercase ) ) def _lowerCamelCase ( self : List[str] ): '''simple docstring''' self.__enter__() self._active_patches.append(self ) def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' try: self._active_patches.remove(self ) except ValueError: # If the patch hasn't been started this will fail return None return self.__exit__()
703
import argparse import json import numpy import torch from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def snake_case_ ( __lowercase , __lowercase ): # Load checkpoint UpperCAmelCase_ : Tuple = torch.load(__lowercase , map_location='''cpu''' ) UpperCAmelCase_ : Optional[int] = chkpt['''model'''] # We have the base model one level deeper than the original XLM repository UpperCAmelCase_ : str = {} for k, v in state_dict.items(): if "pred_layer" in k: UpperCAmelCase_ : Tuple = v else: UpperCAmelCase_ : Union[str, Any] = v UpperCAmelCase_ : int = chkpt['''params'''] UpperCAmelCase_ : Union[str, Any] = {n: v for n, v in config.items() if not isinstance(__lowercase , (torch.FloatTensor, numpy.ndarray) )} UpperCAmelCase_ : int = chkpt['''dico_word2id'''] UpperCAmelCase_ : List[Any] = {s + '''</w>''' if s.find('''@@''' ) == -1 and i > 1_3 else s.replace('''@@''' , '''''' ): i for s, i in vocab.items()} # Save pytorch-model UpperCAmelCase_ : Tuple = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME UpperCAmelCase_ : Tuple = pytorch_dump_folder_path + '''/''' + CONFIG_NAME UpperCAmelCase_ : Dict = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''vocab_file'''] print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' ) torch.save(__lowercase , __lowercase ) print(F'''Save configuration file to {pytorch_config_dump_path}''' ) with open(__lowercase , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(__lowercase , indent=2 ) + '''\n''' ) print(F'''Save vocab file to {pytorch_config_dump_path}''' ) with open(__lowercase , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(__lowercase , indent=2 ) + '''\n''' ) if __name__ == "__main__": __UpperCamelCase : str = argparse.ArgumentParser() # Required parameters parser.add_argument( '--xlm_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) __UpperCamelCase : Dict = parser.parse_args() convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
641
0
from ....configuration_utils import PretrainedConfig from ....utils import logging UpperCamelCase__ : List[str] = logging.get_logger(__name__) UpperCamelCase__ : List[Any] = { """Visual-Attention-Network/van-base""": ( """https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json""" ), } class lowerCamelCase_ ( a_ ): SCREAMING_SNAKE_CASE_ = 'van' def __init__( self : Optional[int] ,__lowerCamelCase : List[Any]=2_24 ,__lowerCamelCase : List[Any]=3 ,__lowerCamelCase : List[str]=[7, 3, 3, 3] ,__lowerCamelCase : Dict=[4, 2, 2, 2] ,__lowerCamelCase : List[Any]=[64, 1_28, 3_20, 5_12] ,__lowerCamelCase : Dict=[3, 3, 12, 3] ,__lowerCamelCase : Optional[int]=[8, 8, 4, 4] ,__lowerCamelCase : Tuple="gelu" ,__lowerCamelCase : str=0.02 ,__lowerCamelCase : List[Any]=1e-6 ,__lowerCamelCase : Any=1e-2 ,__lowerCamelCase : Dict=0.0 ,__lowerCamelCase : Any=0.0 ,**__lowerCamelCase : Optional[int] ,): '''simple docstring''' super().__init__(**__lowerCamelCase ) a = image_size a = num_channels a = patch_sizes a = strides a = hidden_sizes a = depths a = mlp_ratios a = hidden_act a = initializer_range a = layer_norm_eps a = layer_scale_init_value a = drop_path_rate a = dropout_rate
387
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_big_bird import BigBirdTokenizer else: UpperCamelCase__ : Any = None UpperCamelCase__ : List[str] = logging.get_logger(__name__) UpperCamelCase__ : int = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""} UpperCamelCase__ : Union[str, Any] = { """vocab_file""": { """google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model""", """google/bigbird-roberta-large""": ( """https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model""" ), """google/bigbird-base-trivia-itc""": ( """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model""" ), }, """tokenizer_file""": { """google/bigbird-roberta-base""": ( """https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json""" ), """google/bigbird-roberta-large""": ( """https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json""" ), """google/bigbird-base-trivia-itc""": ( """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json""" ), }, } UpperCamelCase__ : Optional[int] = { """google/bigbird-roberta-base""": 4_096, """google/bigbird-roberta-large""": 4_096, """google/bigbird-base-trivia-itc""": 4_096, } UpperCamelCase__ : Tuple = """▁""" class lowerCamelCase_ ( a_ ): SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE_ = BigBirdTokenizer SCREAMING_SNAKE_CASE_ = ['input_ids', 'attention_mask'] SCREAMING_SNAKE_CASE_ = [] def __init__( self : Any ,__lowerCamelCase : List[str]=None ,__lowerCamelCase : Union[str, Any]=None ,__lowerCamelCase : Optional[int]="<unk>" ,__lowerCamelCase : Dict="<s>" ,__lowerCamelCase : Tuple="</s>" ,__lowerCamelCase : List[str]="<pad>" ,__lowerCamelCase : Tuple="[SEP]" ,__lowerCamelCase : List[str]="[MASK]" ,__lowerCamelCase : List[Any]="[CLS]" ,**__lowerCamelCase : List[str] ,): '''simple docstring''' a = AddedToken(__lowerCamelCase ,lstrip=__lowerCamelCase ,rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase ,__lowerCamelCase ) else bos_token a = AddedToken(__lowerCamelCase ,lstrip=__lowerCamelCase ,rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase ,__lowerCamelCase ) else eos_token a = AddedToken(__lowerCamelCase ,lstrip=__lowerCamelCase ,rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase ,__lowerCamelCase ) else unk_token a = AddedToken(__lowerCamelCase ,lstrip=__lowerCamelCase ,rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase ,__lowerCamelCase ) else pad_token a = AddedToken(__lowerCamelCase ,lstrip=__lowerCamelCase ,rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase ,__lowerCamelCase ) else cls_token a = AddedToken(__lowerCamelCase ,lstrip=__lowerCamelCase ,rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase ,__lowerCamelCase ) else sep_token # Mask token behave like a normal word, i.e. include the space before it a = AddedToken(__lowerCamelCase ,lstrip=__lowerCamelCase ,rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase ,__lowerCamelCase ) else mask_token super().__init__( __lowerCamelCase ,tokenizer_file=__lowerCamelCase ,bos_token=__lowerCamelCase ,eos_token=__lowerCamelCase ,unk_token=__lowerCamelCase ,sep_token=__lowerCamelCase ,pad_token=__lowerCamelCase ,cls_token=__lowerCamelCase ,mask_token=__lowerCamelCase ,**__lowerCamelCase ,) a = vocab_file a = False if not self.vocab_file else True def SCREAMING_SNAKE_CASE_ ( self : Dict ,__lowerCamelCase : List[int] ,__lowerCamelCase : Optional[List[int]] = None ): '''simple docstring''' a = [self.sep_token_id] a = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ,__lowerCamelCase : List[int] ,__lowerCamelCase : Optional[List[int]] = None ,__lowerCamelCase : bool = False ): '''simple docstring''' if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is None: return [1] + ([0] * len(__lowerCamelCase )) + [1] return [1] + ([0] * len(__lowerCamelCase )) + [1] + ([0] * len(__lowerCamelCase )) + [1] def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ,__lowerCamelCase : List[int] ,__lowerCamelCase : Optional[List[int]] = None ): '''simple docstring''' a = [self.sep_token_id] a = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ,__lowerCamelCase : str ,__lowerCamelCase : Optional[str] = None ): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(__lowerCamelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return a = os.path.join( __lowerCamelCase ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ): copyfile(self.vocab_file ,__lowerCamelCase ) return (out_vocab_file,)
387
1
from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase : List[Any] = logging.get_logger(__name__) lowercase : Any = { "microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json", "microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json", } class __lowercase ( _SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCAmelCase_ : List[str] = '''markuplm''' def __init__( self , __UpperCAmelCase=3_05_22 , __UpperCAmelCase=7_68 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=30_72 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=2 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=0 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase=2_56 , __UpperCAmelCase=10_24 , __UpperCAmelCase=2_16 , __UpperCAmelCase=10_01 , __UpperCAmelCase=32 , __UpperCAmelCase=50 , __UpperCAmelCase="absolute" , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , ) -> Any: super().__init__( pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase , ) A : Tuple = vocab_size A : str = hidden_size A : str = num_hidden_layers A : Union[str, Any] = num_attention_heads A : Optional[int] = hidden_act A : Any = intermediate_size A : str = hidden_dropout_prob A : Dict = attention_probs_dropout_prob A : Optional[Any] = max_position_embeddings A : int = type_vocab_size A : Optional[int] = initializer_range A : List[Any] = layer_norm_eps A : int = position_embedding_type A : Optional[Any] = use_cache A : List[Any] = classifier_dropout # additional properties A : str = max_depth A : Any = max_xpath_tag_unit_embeddings A : Optional[int] = max_xpath_subs_unit_embeddings A : Optional[int] = tag_pad_id A : List[str] = subs_pad_id A : Any = xpath_unit_hidden_size
423
from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Value from .base import TaskTemplate @dataclass(frozen=_SCREAMING_SNAKE_CASE ) class __lowercase ( _SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCAmelCase_ : str = field(default='''summarization''' , metadata={'''include_in_asdict_even_if_is_default''': True} ) UpperCAmelCase_ : ClassVar[Features] = Features({'''text''': Value('''string''' )} ) UpperCAmelCase_ : ClassVar[Features] = Features({'''summary''': Value('''string''' )} ) UpperCAmelCase_ : str = "text" UpperCAmelCase_ : str = "summary" @property def snake_case ( self ) -> Dict[str, str]: return {self.text_column: "text", self.summary_column: "summary"}
423
1
'''simple docstring''' import inspect import re from hashlib import shaaaa from typing import Dict, List from .arrow import arrow from .audiofolder import audiofolder from .csv import csv from .imagefolder import imagefolder from .json import json from .pandas import pandas from .parquet import parquet from .sql import sql # noqa F401 from .text import text def _UpperCamelCase ( __UpperCamelCase ) -> str: lowerCamelCase_ = [] for line in lines: lowerCamelCase_ = re.sub(R'#.*' ,'' ,__UpperCamelCase ) # remove comments if line: filtered_lines.append(__UpperCamelCase ) lowerCamelCase_ = '\n'.join(__UpperCamelCase ) # Make a hash from all this code lowerCamelCase_ = full_str.encode('utf-8' ) return shaaaa(__UpperCamelCase ).hexdigest() # get importable module names and hash for caching A_ = { "csv": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())), "json": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())), "pandas": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())), "parquet": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())), "arrow": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())), "text": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())), "imagefolder": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())), "audiofolder": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())), } # Used to infer the module to use based on the data files extensions A_ = { ".csv": ("csv", {}), ".tsv": ("csv", {"sep": "\t"}), ".json": ("json", {}), ".jsonl": ("json", {}), ".parquet": ("parquet", {}), ".arrow": ("arrow", {}), ".txt": ("text", {}), } _EXTENSION_TO_MODULE.update({ext: ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext: ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) A_ = {"imagefolder", "audiofolder"} # Used to filter data files based on extensions given a module name A_ = {} for _ext, (_module, _) in _EXTENSION_TO_MODULE.items(): _MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext) _MODULE_TO_EXTENSIONS["imagefolder"].append(".zip") _MODULE_TO_EXTENSIONS["audiofolder"].append(".zip")
42
"""simple docstring""" from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __magic_name__ ( __UpperCAmelCase ): __A : Tuple = ["image_processor", "tokenizer"] __A : Dict = "BlipImageProcessor" __A : Dict = "AutoTokenizer" def __init__( self : Any , snake_case__ : Union[str, Any] , snake_case__ : str ): '''simple docstring''' lowercase :Dict = False super().__init__(snake_case__ , snake_case__ ) lowercase :Union[str, Any] = self.image_processor def __call__( self : Optional[int] , snake_case__ : ImageInput = None , snake_case__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , snake_case__ : bool = True , snake_case__ : Union[bool, str, PaddingStrategy] = False , snake_case__ : Union[bool, str, TruncationStrategy] = None , snake_case__ : Optional[int] = None , snake_case__ : int = 0 , snake_case__ : Optional[int] = None , snake_case__ : Optional[bool] = None , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = True , snake_case__ : Optional[Union[str, TensorType]] = None , **snake_case__ : Optional[Any] , ): '''simple docstring''' if images is None and text is None: raise ValueError('''You have to specify either images or text.''' ) # Get only text if images is None: lowercase :List[Any] = self.tokenizer lowercase :str = self.tokenizer( text=snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , stride=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , return_overflowing_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , return_offsets_mapping=snake_case__ , return_token_type_ids=snake_case__ , return_length=snake_case__ , verbose=snake_case__ , return_tensors=snake_case__ , **snake_case__ , ) return text_encoding # add pixel_values lowercase :Union[str, Any] = self.image_processor(snake_case__ , return_tensors=snake_case__ ) if text is not None: lowercase :int = self.tokenizer( text=snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , stride=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , return_overflowing_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , return_offsets_mapping=snake_case__ , return_token_type_ids=snake_case__ , return_length=snake_case__ , verbose=snake_case__ , return_tensors=snake_case__ , **snake_case__ , ) else: lowercase :Optional[int] = None if text_encoding is not None: encoding_image_processor.update(snake_case__ ) return encoding_image_processor def __snake_case ( self : Tuple , *snake_case__ : List[Any] , **snake_case__ : Tuple ): '''simple docstring''' return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ ) def __snake_case ( self : List[str] , *snake_case__ : Dict , **snake_case__ : List[Any] ): '''simple docstring''' return self.tokenizer.decode(*snake_case__ , **snake_case__ ) @property # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names def __snake_case ( self : List[Any] ): '''simple docstring''' lowercase :List[Any] = self.tokenizer.model_input_names lowercase :List[Any] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
677
0
import logging import os import sys from dataclasses import dataclass, field from typing import Optional import numpy as np import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING, AutoConfig, AutoImageProcessor, AutoModelForMaskedImageModeling, HfArgumentParser, Trainer, TrainingArguments, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version __magic_name__ = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('''4.31.0''') require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''') __magic_name__ = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys()) __magic_name__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class a__ : """simple docstring""" A__ : Optional[str] = field( default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} ) A__ : Optional[str] = field( default=_snake_case , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) A__ : Optional[str] = field( default=_snake_case , metadata={'''help''': '''The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'''} , ) A__ : Optional[str] = field(default=_snake_case , metadata={'''help''': '''A folder containing the training data.'''} ) A__ : Optional[str] = field(default=_snake_case , metadata={'''help''': '''A folder containing the validation data.'''} ) A__ : Optional[float] = field( default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} ) A__ : int = field(default=32 , metadata={'''help''': '''The size of the square patches to use for masking.'''} ) A__ : float = field( default=0.6 , metadata={'''help''': '''Percentage of patches to mask.'''} , ) A__ : Optional[int] = field( default=_snake_case , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) A__ : Optional[int] = field( default=_snake_case , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) def __UpperCAmelCase ( self :Tuple ): lowercase = {} if self.train_dir is not None: lowercase = self.train_dir if self.validation_dir is not None: lowercase = self.validation_dir lowercase = data_files if data_files else None @dataclass class a__ : """simple docstring""" A__ : str = field( default=_snake_case , metadata={ '''help''': ( '''The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a ''' '''checkpoint identifier on the hub. ''' '''Don\'t set if you want to train a model from scratch.''' ) } , ) A__ : Optional[str] = field( default=_snake_case , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(_snake_case )} , ) A__ : Optional[str] = field( default=_snake_case , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) A__ : Optional[str] = field( default=_snake_case , metadata={ '''help''': ( '''Override some existing default config settings when a model is trained from scratch. Example: ''' '''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index''' ) } , ) A__ : Optional[str] = field( default=_snake_case , metadata={'''help''': '''Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'''} , ) A__ : str = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) A__ : str = field(default=_snake_case , metadata={'''help''': '''Name or path of preprocessor config.'''} ) A__ : bool = field( default=_snake_case , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) A__ : Optional[int] = field( default=_snake_case , metadata={ '''help''': ( '''The size (resolution) of each image. If not specified, will use `image_size` of the configuration.''' ) } , ) A__ : Optional[int] = field( default=_snake_case , metadata={ '''help''': ( '''The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.''' ) } , ) A__ : Optional[int] = field( default=_snake_case , metadata={'''help''': '''Stride to use for the encoder.'''} , ) class a__ : """simple docstring""" def __init__( self :Any , lowercase__ :Union[str, Any]=192 , lowercase__ :Tuple=32 , lowercase__ :Optional[int]=4 , lowercase__ :List[str]=0.6 ): lowercase = input_size lowercase = mask_patch_size lowercase = model_patch_size lowercase = mask_ratio if self.input_size % self.mask_patch_size != 0: raise ValueError('Input size must be divisible by mask patch size' ) if self.mask_patch_size % self.model_patch_size != 0: raise ValueError('Mask patch size must be divisible by model patch size' ) lowercase = self.input_size // self.mask_patch_size lowercase = self.mask_patch_size // self.model_patch_size lowercase = self.rand_size**2 lowercase = int(np.ceil(self.token_count * self.mask_ratio ) ) def __call__( self :Any ): lowercase = np.random.permutation(self.token_count )[: self.mask_count] lowercase = np.zeros(self.token_count , dtype=lowercase__ ) lowercase = 1 lowercase = mask.reshape((self.rand_size, self.rand_size) ) lowercase = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 ) return torch.tensor(mask.flatten() ) def __snake_case ( _UpperCAmelCase ): """simple docstring""" lowercase = torch.stack([example['pixel_values'] for example in examples] ) lowercase = torch.stack([example['mask'] for example in examples] ) return {"pixel_values": pixel_values, "bool_masked_pos": mask} def __snake_case ( ): """simple docstring""" lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. lowercase , lowercase , lowercase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: lowercase , lowercase , lowercase = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('run_mim' , _UpperCAmelCase , _UpperCAmelCase ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() lowercase = training_args.get_process_log_level() logger.setLevel(_UpperCAmelCase ) transformers.utils.logging.set_verbosity(_UpperCAmelCase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) logger.info(f"""Training/evaluation parameters {training_args}""" ) # Detecting last checkpoint. lowercase = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: lowercase = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f"""Output directory ({training_args.output_dir}) already exists and is not empty. """ 'Use --overwrite_output_dir to overcome.' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ 'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' ) # Initialize our dataset. lowercase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. lowercase = None if 'validation' in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , _UpperCAmelCase ) and data_args.train_val_split > 0.0: lowercase = ds['train'].train_test_split(data_args.train_val_split ) lowercase = split['train'] lowercase = split['test'] # Create config # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. lowercase = { 'cache_dir': model_args.cache_dir, 'revision': model_args.model_revision, 'use_auth_token': True if model_args.use_auth_token else None, } if model_args.config_name_or_path: lowercase = AutoConfig.from_pretrained(model_args.config_name_or_path , **_UpperCAmelCase ) elif model_args.model_name_or_path: lowercase = AutoConfig.from_pretrained(model_args.model_name_or_path , **_UpperCAmelCase ) else: lowercase = CONFIG_MAPPING[model_args.model_type]() logger.warning('You are instantiating a new config instance from scratch.' ) if model_args.config_overrides is not None: logger.info(f"""Overriding config: {model_args.config_overrides}""" ) config.update_from_string(model_args.config_overrides ) logger.info(f"""New config: {config}""" ) # make sure the decoder_type is "simmim" (only relevant for BEiT) if hasattr(_UpperCAmelCase , 'decoder_type' ): lowercase = 'simmim' # adapt config lowercase = model_args.image_size if model_args.image_size is not None else config.image_size lowercase = model_args.patch_size if model_args.patch_size is not None else config.patch_size lowercase = ( model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride ) config.update( { 'image_size': model_args.image_size, 'patch_size': model_args.patch_size, 'encoder_stride': model_args.encoder_stride, } ) # create image processor if model_args.image_processor_name: lowercase = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **_UpperCAmelCase ) elif model_args.model_name_or_path: lowercase = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **_UpperCAmelCase ) else: lowercase = { conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items() } lowercase = IMAGE_PROCESSOR_TYPES[model_args.model_type]() # create model if model_args.model_name_or_path: lowercase = AutoModelForMaskedImageModeling.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info('Training new model from scratch' ) lowercase = AutoModelForMaskedImageModeling.from_config(_UpperCAmelCase ) if training_args.do_train: lowercase = ds['train'].column_names else: lowercase = ds['validation'].column_names if data_args.image_column_name is not None: lowercase = data_args.image_column_name elif "image" in column_names: lowercase = 'image' elif "img" in column_names: lowercase = 'img' else: lowercase = column_names[0] # transformations as done in original SimMIM paper # source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py lowercase = Compose( [ Lambda(lambda _UpperCAmelCase : img.convert('RGB' ) if img.mode != "RGB" else img ), RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) # create mask generator lowercase = MaskGenerator( input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , ) def preprocess_images(_UpperCAmelCase ): lowercase = [transforms(_UpperCAmelCase ) for image in examples[image_column_name]] lowercase = [mask_generator() for i in range(len(examples[image_column_name] ) )] return examples if training_args.do_train: if "train" not in ds: raise ValueError('--do_train requires a train dataset' ) if data_args.max_train_samples is not None: lowercase = ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(_UpperCAmelCase ) if training_args.do_eval: if "validation" not in ds: raise ValueError('--do_eval requires a validation dataset' ) if data_args.max_eval_samples is not None: lowercase = ( ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(_UpperCAmelCase ) # Initialize our trainer lowercase = Trainer( model=_UpperCAmelCase , args=_UpperCAmelCase , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=_UpperCAmelCase , data_collator=_UpperCAmelCase , ) # Training if training_args.do_train: lowercase = None if training_args.resume_from_checkpoint is not None: lowercase = training_args.resume_from_checkpoint elif last_checkpoint is not None: lowercase = last_checkpoint lowercase = trainer.train(resume_from_checkpoint=_UpperCAmelCase ) trainer.save_model() trainer.log_metrics('train' , train_result.metrics ) trainer.save_metrics('train' , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: lowercase = trainer.evaluate() trainer.log_metrics('eval' , _UpperCAmelCase ) trainer.save_metrics('eval' , _UpperCAmelCase ) # Write model card and (optionally) push to hub lowercase = { 'finetuned_from': model_args.model_name_or_path, 'tasks': 'masked-image-modeling', 'dataset': data_args.dataset_name, 'tags': ['masked-image-modeling'], } if training_args.push_to_hub: trainer.push_to_hub(**_UpperCAmelCase ) else: trainer.create_model_card(**_UpperCAmelCase ) if __name__ == "__main__": main()
717
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __magic_name__ = { '''configuration_nllb_moe''': [ '''NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''NllbMoeConfig''', ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = [ '''NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST''', '''NllbMoeForConditionalGeneration''', '''NllbMoeModel''', '''NllbMoePreTrainedModel''', '''NllbMoeTop2Router''', '''NllbMoeSparseMLP''', ] if TYPE_CHECKING: from .configuration_nllb_moe import ( NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP, NllbMoeConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nllb_moe import ( NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST, NllbMoeForConditionalGeneration, NllbMoeModel, NllbMoePreTrainedModel, NllbMoeSparseMLP, NllbMoeTopaRouter, ) else: import sys __magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
314
0
"""simple docstring""" import argparse import os import re __UpperCamelCase : Any = '''src/diffusers''' # Pattern that looks at the indentation in a line. __UpperCamelCase : Any = re.compile(R'''^(\s*)\S''') # Pattern that matches `"key":" and puts `key` in group 0. __UpperCamelCase : Union[str, Any] = re.compile(R'''^\s*"([^"]+)":''') # Pattern that matches `_import_structure["key"]` and puts `key` in group 0. __UpperCamelCase : Tuple = re.compile(R'''^\s*_import_structure\["([^"]+)"\]''') # Pattern that matches `"key",` and puts `key` in group 0. __UpperCamelCase : Any = re.compile(R'''^\s*"([^"]+)",\s*$''') # Pattern that matches any `[stuff]` and puts `stuff` in group 0. __UpperCamelCase : List[Any] = re.compile(R'''\[([^\]]+)\]''') def __SCREAMING_SNAKE_CASE ( A_ ): lowerCAmelCase__ : Any = _re_indent.search(__UpperCamelCase ) return "" if search is None else search.groups()[0] def __SCREAMING_SNAKE_CASE ( A_ , A_="" , A_=None , A_=None ): lowerCAmelCase__ : int = 0 lowerCAmelCase__ : Dict = code.split('''\n''' ) if start_prompt is not None: while not lines[index].startswith(__UpperCamelCase ): index += 1 lowerCAmelCase__ : Optional[int] = ["""\n""".join(lines[:index] )] else: lowerCAmelCase__ : List[str] = [] # We split into blocks until we get to the `end_prompt` (or the end of the block). lowerCAmelCase__ : Any = [lines[index]] index += 1 while index < len(__UpperCamelCase ) and (end_prompt is None or not lines[index].startswith(__UpperCamelCase )): if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level: if len(__UpperCamelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ): current_block.append(lines[index] ) blocks.append('''\n'''.join(__UpperCamelCase ) ) if index < len(__UpperCamelCase ) - 1: lowerCAmelCase__ : Union[str, Any] = [lines[index + 1]] index += 1 else: lowerCAmelCase__ : Optional[int] = [] else: blocks.append('''\n'''.join(__UpperCamelCase ) ) lowerCAmelCase__ : Union[str, Any] = [lines[index]] else: current_block.append(lines[index] ) index += 1 # Adds current block if it's nonempty. if len(__UpperCamelCase ) > 0: blocks.append('''\n'''.join(__UpperCamelCase ) ) # Add final block after end_prompt if provided. if end_prompt is not None and index < len(__UpperCamelCase ): blocks.append('''\n'''.join(lines[index:] ) ) return blocks def __SCREAMING_SNAKE_CASE ( A_ ): def _inner(A_ ): return key(__UpperCamelCase ).lower().replace('''_''' , '''''' ) return _inner def __SCREAMING_SNAKE_CASE ( A_ , A_=None ): def noop(A_ ): return x if key is None: lowerCAmelCase__ : Tuple = noop # Constants are all uppercase, they go first. lowerCAmelCase__ : Dict = [obj for obj in objects if key(__UpperCamelCase ).isupper()] # Classes are not all uppercase but start with a capital, they go second. lowerCAmelCase__ : str = [obj for obj in objects if key(__UpperCamelCase )[0].isupper() and not key(__UpperCamelCase ).isupper()] # Functions begin with a lowercase, they go last. lowerCAmelCase__ : int = [obj for obj in objects if not key(__UpperCamelCase )[0].isupper()] lowerCAmelCase__ : Optional[int] = ignore_underscore(__UpperCamelCase ) return sorted(__UpperCamelCase , key=__UpperCamelCase ) + sorted(__UpperCamelCase , key=__UpperCamelCase ) + sorted(__UpperCamelCase , key=__UpperCamelCase ) def __SCREAMING_SNAKE_CASE ( A_ ): def _replace(A_ ): lowerCAmelCase__ : List[Any] = match.groups()[0] if "," not in imports: return f'[{imports}]' lowerCAmelCase__ : Tuple = [part.strip().replace('''\"''' , '''''' ) for part in imports.split(''',''' )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: lowerCAmelCase__ : Optional[Any] = keys[:-1] return "[" + ", ".join([f'"{k}"' for k in sort_objects(__UpperCamelCase )] ) + "]" lowerCAmelCase__ : Optional[Any] = import_statement.split('''\n''' ) if len(__UpperCamelCase ) > 3: # Here we have to sort internal imports that are on several lines (one per name): # key: [ # "object1", # "object2", # ... # ] # We may have to ignore one or two lines on each side. lowerCAmelCase__ : Any = 2 if lines[1].strip() == """[""" else 1 lowerCAmelCase__ : Any = [(i, _re_strip_line.search(__UpperCamelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )] lowerCAmelCase__ : List[str] = sort_objects(__UpperCamelCase , key=lambda A_ : x[1] ) lowerCAmelCase__ : Union[str, Any] = [lines[x[0] + idx] for x in sorted_indices] return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] ) elif len(__UpperCamelCase ) == 3: # Here we have to sort internal imports that are on one separate line: # key: [ # "object1", "object2", ... # ] if _re_bracket_content.search(lines[1] ) is not None: lowerCAmelCase__ : Any = _re_bracket_content.sub(_replace , lines[1] ) else: lowerCAmelCase__ : List[str] = [part.strip().replace('''\"''' , '''''' ) for part in lines[1].split(''',''' )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: lowerCAmelCase__ : Tuple = keys[:-1] lowerCAmelCase__ : List[str] = get_indent(lines[1] ) + """, """.join([f'"{k}"' for k in sort_objects(__UpperCamelCase )] ) return "\n".join(__UpperCamelCase ) else: # Finally we have to deal with imports fitting on one line lowerCAmelCase__ : Any = _re_bracket_content.sub(_replace , __UpperCamelCase ) return import_statement def __SCREAMING_SNAKE_CASE ( A_ , A_=True ): with open(__UpperCamelCase , '''r''' ) as f: lowerCAmelCase__ : str = f.read() if "_import_structure" not in code: return # Blocks of indent level 0 lowerCAmelCase__ : int = split_code_in_indented_blocks( __UpperCamelCase , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' ) # We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt). for block_idx in range(1 , len(__UpperCamelCase ) - 1 ): # Check if the block contains some `_import_structure`s thingy to sort. lowerCAmelCase__ : List[str] = main_blocks[block_idx] lowerCAmelCase__ : List[str] = block.split('''\n''' ) # Get to the start of the imports. lowerCAmelCase__ : int = 0 while line_idx < len(__UpperCamelCase ) and "_import_structure" not in block_lines[line_idx]: # Skip dummy import blocks if "import dummy" in block_lines[line_idx]: lowerCAmelCase__ : int = len(__UpperCamelCase ) else: line_idx += 1 if line_idx >= len(__UpperCamelCase ): continue # Ignore beginning and last line: they don't contain anything. lowerCAmelCase__ : Dict = """\n""".join(block_lines[line_idx:-1] ) lowerCAmelCase__ : Tuple = get_indent(block_lines[1] ) # Slit the internal block into blocks of indent level 1. lowerCAmelCase__ : List[str] = split_code_in_indented_blocks(__UpperCamelCase , indent_level=__UpperCamelCase ) # We have two categories of import key: list or _import_structure[key].append/extend lowerCAmelCase__ : Dict = _re_direct_key if """_import_structure""" in block_lines[0] else _re_indirect_key # Grab the keys, but there is a trap: some lines are empty or just comments. lowerCAmelCase__ : str = [(pattern.search(__UpperCamelCase ).groups()[0] if pattern.search(__UpperCamelCase ) is not None else None) for b in internal_blocks] # We only sort the lines with a key. lowerCAmelCase__ : str = [(i, key) for i, key in enumerate(__UpperCamelCase ) if key is not None] lowerCAmelCase__ : int = [x[0] for x in sorted(__UpperCamelCase , key=lambda A_ : x[1] )] # We reorder the blocks by leaving empty lines/comments as they were and reorder the rest. lowerCAmelCase__ : int = 0 lowerCAmelCase__ : int = [] for i in range(len(__UpperCamelCase ) ): if keys[i] is None: reordered_blocks.append(internal_blocks[i] ) else: lowerCAmelCase__ : str = sort_objects_in_import(internal_blocks[sorted_indices[count]] ) reordered_blocks.append(__UpperCamelCase ) count += 1 # And we put our main block back together with its first and last line. lowerCAmelCase__ : Any = """\n""".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] ) if code != "\n".join(__UpperCamelCase ): if check_only: return True else: print(f'Overwriting {file}.' ) with open(__UpperCamelCase , '''w''' ) as f: f.write('''\n'''.join(__UpperCamelCase ) ) def __SCREAMING_SNAKE_CASE ( A_=True ): lowerCAmelCase__ : Any = [] for root, _, files in os.walk(__UpperCamelCase ): if "__init__.py" in files: lowerCAmelCase__ : Tuple = sort_imports(os.path.join(__UpperCamelCase , '''__init__.py''' ) , check_only=__UpperCamelCase ) if result: lowerCAmelCase__ : List[str] = [os.path.join(__UpperCamelCase , '''__init__.py''' )] if len(__UpperCamelCase ) > 0: raise ValueError(f'Would overwrite {len(__UpperCamelCase )} files, run `make style`.' ) if __name__ == "__main__": __UpperCamelCase : Tuple = argparse.ArgumentParser() parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''') __UpperCamelCase : Dict = parser.parse_args() sort_imports_in_all_inits(check_only=args.check_only)
450
"""simple docstring""" # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __lowerCAmelCase : List[Any] = { '''configuration_vivit''': ['''VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VivitConfig'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : List[Any] = ['''VivitImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : List[Any] = [ '''VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''VivitModel''', '''VivitPreTrainedModel''', '''VivitForVideoClassification''', ] if TYPE_CHECKING: from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_vivit import VivitImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vivit import ( VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST, VivitForVideoClassification, VivitModel, VivitPreTrainedModel, ) else: import sys __lowerCAmelCase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
58
0
"""simple docstring""" import math import os import re import sys import unittest from pathlib import Path from typing import Tuple from unittest.mock import patch from parameterized import parameterized from transformers.testing_utils import ( CaptureStderr, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, get_torch_dist_unique_port, require_apex, require_bitsandbytes, require_fairscale, require_torch, require_torch_gpu, require_torch_multi_gpu, require_torch_non_multi_gpu, slow, ) from transformers.trainer_callback import TrainerState from transformers.trainer_utils import set_seed _lowerCAmelCase = os.path.abspath(os.path.dirname(__file__)) with ExtendSysPath(F'''{bindir}/../../examples/pytorch/translation'''): from run_translation import main # noqa set_seed(4_2) _lowerCAmelCase = """sshleifer/student_marian_en_ro_6_1""" _lowerCAmelCase = """sshleifer/tiny-mbart""" @require_torch class __UpperCamelCase ( a__ ): def __lowerCamelCase ( self ,_A=False ,_A=None ,_A=True ,_A=True ,_A=True ,_A=True ,): '''simple docstring''' _lowerCAmelCase : Any = self.run_trainer( eval_steps=1 ,max_len=12 ,model_name=_A ,num_train_epochs=1 ,distributed=_A ,extra_args_str=_A ,predict_with_generate=_A ,do_train=_A ,do_eval=_A ,do_predict=_A ,) _lowerCAmelCase : Optional[Any] = TrainerState.load_from_json(os.path.join(_A ,'trainer_state.json' ) ).log_history if not do_eval: return _lowerCAmelCase : Tuple = [log for log in logs if 'eval_loss' in log.keys()] _lowerCAmelCase : Union[str, Any] = eval_metrics[0] if predict_with_generate: assert "eval_bleu" in first_step_stats _lowerCAmelCase : List[str] = eval_metrics[-1] assert isinstance(last_step_stats['eval_bleu'] ,_A ) assert not math.isnan(float(last_step_stats['eval_loss'] ) ), "eval_loss must not be `nan`" @require_torch_non_multi_gpu def __lowerCamelCase ( self ): '''simple docstring''' self.run_seqaseq_quick() @require_torch_multi_gpu def __lowerCamelCase ( self ): '''simple docstring''' self.run_seqaseq_quick(distributed=_A ) @require_torch_multi_gpu def __lowerCamelCase ( self ): '''simple docstring''' self.run_seqaseq_quick(distributed=_A ) @unittest.skip('Requires an update of the env running those tests' ) @require_torch_multi_gpu @require_fairscale def __lowerCamelCase ( self ): '''simple docstring''' self.run_seqaseq_quick(distributed=_A ,extra_args_str='--sharded_ddp simple' ) @unittest.skip('Requires an update of the env running those tests' ) @require_torch_multi_gpu @require_fairscale def __lowerCamelCase ( self ): '''simple docstring''' self.run_seqaseq_quick(distributed=_A ,extra_args_str='--sharded_ddp simple --fp16' ) @unittest.skip('Requires an update of the env running those tests' ) @require_torch_multi_gpu @require_fairscale def __lowerCamelCase ( self ): '''simple docstring''' self.run_seqaseq_quick(distributed=_A ,extra_args_str='--sharded_ddp zero_dp_2' ,predict_with_generate=_A ) @unittest.skip('Requires an update of the env running those tests' ) @require_torch_multi_gpu @require_fairscale def __lowerCamelCase ( self ): '''simple docstring''' self.run_seqaseq_quick( distributed=_A ,extra_args_str='--sharded_ddp zero_dp_2 --fp16' ,predict_with_generate=_A ) @require_apex @require_torch_gpu def __lowerCamelCase ( self ): '''simple docstring''' self.run_seqaseq_quick(distributed=_A ,extra_args_str='--fp16 --fp16_backend=apex' ) # test 2nd time - was getting eval_loss': nan' # to reproduce the problem set distributed=False self.run_seqaseq_quick(distributed=_A ,extra_args_str='--fp16 --fp16_backend=apex' ) @parameterized.expand(['base', 'low', 'high', 'mixed'] ) @require_torch_multi_gpu def __lowerCamelCase ( self ,_A ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = { # test with the default log_level - should be info and thus log info once 'base': {'extra_args_str': '', 'n_matches': 1}, # test with low log_level and log_level_replica - should be noisy on all processes # now the info string should appear twice on 2 processes 'low': {'extra_args_str': '--log_level debug --log_level_replica debug', 'n_matches': 2}, # test with high log_level and low log_level_replica # now the info string should appear once only on the replica 'high': {'extra_args_str': '--log_level error --log_level_replica debug', 'n_matches': 1}, # test with high log_level and log_level_replica - should be quiet on all processes 'mixed': {'extra_args_str': '--log_level error --log_level_replica error', 'n_matches': 0}, } _lowerCAmelCase : int = experiments[experiment_id] _lowerCAmelCase : List[Any] = {'distributed': True, 'predict_with_generate': False, 'do_eval': False, 'do_predict': False} _lowerCAmelCase : Tuple = 'Running training' with CaptureStderr() as cl: self.run_seqaseq_quick(**_A ,extra_args_str=data['extra_args_str'] ) _lowerCAmelCase : int = len(re.findall(_A ,cl.err ) ) self.assertEqual(_A ,data['n_matches'] ) @slow def __lowerCamelCase ( self ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = self.run_trainer( eval_steps=2 ,max_len=128 ,model_name=_A ,learning_rate=3E-4 ,num_train_epochs=10 ,distributed=_A ,) # Check metrics _lowerCAmelCase : Tuple = TrainerState.load_from_json(os.path.join(_A ,'trainer_state.json' ) ).log_history _lowerCAmelCase : Union[str, Any] = [log for log in logs if 'eval_loss' in log.keys()] _lowerCAmelCase : int = eval_metrics[0] _lowerCAmelCase : List[str] = eval_metrics[-1] assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing" assert isinstance(last_step_stats['eval_bleu'] ,_A ) # test if do_predict saves generations and metrics _lowerCAmelCase : str = os.listdir(_A ) _lowerCAmelCase : Dict = {os.path.basename(_A ) for p in contents} assert "generated_predictions.txt" in contents assert "predict_results.json" in contents @slow @require_bitsandbytes def __lowerCamelCase ( self ): '''simple docstring''' from transformers.training_args import OptimizerNames def train_and_return_metrics(_A ) -> Tuple[int, float]: _lowerCAmelCase : List[str] = '--skip_memory_metrics 0' _lowerCAmelCase : str = self.run_trainer( max_len=128 ,model_name=_A ,learning_rate=3E-4 ,num_train_epochs=1 ,optim=_A ,distributed=_A ,extra_args_str=_A ,do_eval=_A ,do_predict=_A ,n_gpus_to_use=1 ,) # Check metrics _lowerCAmelCase : str = TrainerState.load_from_json(Path(_A ,'trainer_state.json' ) ).log_history _lowerCAmelCase : List[Any] = int(logs[0]['train_mem_gpu_peaked_delta'] / 2**20 ) _lowerCAmelCase : List[str] = int(logs[0]['train_mem_gpu_alloc_delta'] / 2**20 ) _lowerCAmelCase : Optional[Any] = logs[0]['train_loss'] return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Tuple = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value ) _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : str = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value ) _lowerCAmelCase : Union[str, Any] = gpu_alloc_mem_orig - gpu_alloc_mem_bnb _lowerCAmelCase : List[str] = gpu_peak_mem_orig + gpu_alloc_mem_orig _lowerCAmelCase : List[Any] = gpu_peak_mem_bnb + gpu_alloc_mem_bnb _lowerCAmelCase : List[str] = gpu_total_mem_orig - gpu_total_mem_bnb # sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which # doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized # in 2 bytes and the diff in optim memory usage is derived as so: # # - normal 25*8=~200MB (8 bytes per param) # - bnb 25*2= ~50MB (2 bytes per param) # # Thus we should expect ~150MB total memory saved. # # Peak memory should be the same - the total should be different by about that same margin # # After leaving a small margin to accommodate for differences between gpus let's check # that we have at least 120MB in savings _lowerCAmelCase : Any = 120 # uncomment the following if this test starts failing - requires py38 for a new print feature # gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb # print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB") # print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB") # print(f"{gpu_alloc_mem_diff=}MB") # print(f"{gpu_peak_mem_diff=}MB") # print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB") # print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB") self.assertGreater( _A ,_A ,'should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got' F""" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and""" F""" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB""" ,) self.assertGreater( _A ,_A ,'should use ~150MB less total gpu memory with BNB, compared to without it for this model but got' F""" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and""" F""" gpu_total_mem_bnb={gpu_total_mem_bnb}MB""" ,) self.assertEqual( _A ,_A ,F"""loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}""" ) def __lowerCamelCase ( self ,_A ,_A ,_A ,_A = 3E-3 ,_A = "adafactor" ,_A = False ,_A = None ,_A = 0 ,_A = True ,_A = True ,_A = True ,_A = True ,_A = None ,): '''simple docstring''' _lowerCAmelCase : int = self.test_file_dir / '../fixtures/tests_samples/wmt_en_ro' _lowerCAmelCase : Optional[int] = self.get_auto_remove_tmp_dir() _lowerCAmelCase : Tuple = F""" --model_name_or_path {model_name} --train_file {data_dir}/train.json --validation_file {data_dir}/val.json --test_file {data_dir}/test.json --output_dir {output_dir} --overwrite_output_dir --max_train_samples 8 --max_source_length {max_len} --max_target_length {max_len} --do_train --num_train_epochs {str(_A )} --per_device_train_batch_size 4 --learning_rate {learning_rate} --warmup_steps 8 --logging_steps 0 --logging_strategy no --save_steps {str(_A )} --group_by_length --label_smoothing_factor 0.1 --target_lang ro_RO --source_lang en_XX """.split() _lowerCAmelCase : Dict = F""" --do_eval --per_device_eval_batch_size 4 --max_eval_samples 8 --val_max_target_length {max_len} --evaluation_strategy steps --eval_steps {str(_A )} """.split() _lowerCAmelCase : List[str] = '\n --do_predict\n '.split() _lowerCAmelCase : int = [] if do_train: args += args_train if do_eval: args += args_eval if do_predict: args += args_predict if predict_with_generate: args += "--predict_with_generate".split() if do_train: if optim == "adafactor": args += "--adafactor".split() else: args += F"""--optim {optim}""".split() if extra_args_str is not None: args += extra_args_str.split() if distributed: if n_gpus_to_use is None: _lowerCAmelCase : List[Any] = get_gpu_count() _lowerCAmelCase : List[str] = get_torch_dist_unique_port() _lowerCAmelCase : Tuple = F""" -m torch.distributed.run --nproc_per_node={n_gpus_to_use} --master_port={master_port} {self.examples_dir_str}/pytorch/translation/run_translation.py """.split() _lowerCAmelCase : Optional[int] = [sys.executable] + distributed_args + args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(_A ,env=self.get_env() ) else: _lowerCAmelCase : Optional[Any] = ['run_translation.py'] + args with patch.object(_A ,'argv' ,_A ): main() return output_dir
16
"""simple docstring""" import argparse import struct import unittest class __UpperCamelCase : def __init__( self ,_A ): '''simple docstring''' _lowerCAmelCase : Optional[int] = data # Initialize hash values _lowerCAmelCase : Any = [ 0x6A09_E667, 0xBB67_AE85, 0x3C6E_F372, 0xA54F_F53A, 0x510E_527F, 0x9B05_688C, 0x1F83_D9AB, 0x5BE0_CD19, ] # Initialize round constants _lowerCAmelCase : str = [ 0x428A_2F98, 0x7137_4491, 0xB5C0_FBCF, 0xE9B5_DBA5, 0x3956_C25B, 0x59F1_11F1, 0x923F_82A4, 0xAB1C_5ED5, 0xD807_AA98, 0x1283_5B01, 0x2431_85BE, 0x550C_7DC3, 0x72BE_5D74, 0x80DE_B1FE, 0x9BDC_06A7, 0xC19B_F174, 0xE49B_69C1, 0xEFBE_4786, 0x0FC1_9DC6, 0x240C_A1CC, 0x2DE9_2C6F, 0x4A74_84AA, 0x5CB0_A9DC, 0x76F9_88DA, 0x983E_5152, 0xA831_C66D, 0xB003_27C8, 0xBF59_7FC7, 0xC6E0_0BF3, 0xD5A7_9147, 0x06CA_6351, 0x1429_2967, 0x27B7_0A85, 0x2E1B_2138, 0x4D2C_6DFC, 0x5338_0D13, 0x650A_7354, 0x766A_0ABB, 0x81C2_C92E, 0x9272_2C85, 0xA2BF_E8A1, 0xA81A_664B, 0xC24B_8B70, 0xC76C_51A3, 0xD192_E819, 0xD699_0624, 0xF40E_3585, 0x106A_A070, 0x19A4_C116, 0x1E37_6C08, 0x2748_774C, 0x34B0_BCB5, 0x391C_0CB3, 0x4ED8_AA4A, 0x5B9C_CA4F, 0x682E_6FF3, 0x748F_82EE, 0x78A5_636F, 0x84C8_7814, 0x8CC7_0208, 0x90BE_FFFA, 0xA450_6CEB, 0xBEF9_A3F7, 0xC671_78F2, ] _lowerCAmelCase : Any = self.preprocessing(self.data ) self.final_hash() @staticmethod def __lowerCamelCase ( _A ): '''simple docstring''' _lowerCAmelCase : int = b'\x80' + (b'\x00' * (63 - (len(_A ) + 8) % 64)) _lowerCAmelCase : Any = struct.pack('>Q' ,(len(_A ) * 8) ) return data + padding + big_endian_integer def __lowerCamelCase ( self ): '''simple docstring''' _lowerCAmelCase : List[Any] = [ self.preprocessed_data[x : x + 64] for x in range(0 ,len(self.preprocessed_data ) ,64 ) ] for block in self.blocks: # Convert the given block into a list of 4 byte integers _lowerCAmelCase : int = list(struct.unpack('>16L' ,_A ) ) # add 48 0-ed integers words += [0] * 48 _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Tuple = self.hashes for index in range(0 ,64 ): if index > 15: # modify the zero-ed indexes at the end of the array _lowerCAmelCase : List[str] = ( self.ror(words[index - 15] ,7 ) ^ self.ror(words[index - 15] ,18 ) ^ (words[index - 15] >> 3) ) _lowerCAmelCase : Tuple = ( self.ror(words[index - 2] ,17 ) ^ self.ror(words[index - 2] ,19 ) ^ (words[index - 2] >> 10) ) _lowerCAmelCase : str = ( words[index - 16] + sa + words[index - 7] + sa ) % 0x1_0000_0000 # Compression _lowerCAmelCase : Optional[Any] = self.ror(_A ,6 ) ^ self.ror(_A ,11 ) ^ self.ror(_A ,25 ) _lowerCAmelCase : int = (e & f) ^ ((~e & 0xFFFF_FFFF) & g) _lowerCAmelCase : int = ( h + sa + ch + self.round_constants[index] + words[index] ) % 0x1_0000_0000 _lowerCAmelCase : Union[str, Any] = self.ror(_A ,2 ) ^ self.ror(_A ,13 ) ^ self.ror(_A ,22 ) _lowerCAmelCase : Any = (a & b) ^ (a & c) ^ (b & c) _lowerCAmelCase : Any = (sa + maj) % 0x1_0000_0000 _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Tuple = ( g, f, e, ((d + tempa) % 0x1_0000_0000), c, b, a, ((tempa + tempa) % 0x1_0000_0000), ) _lowerCAmelCase : Any = [a, b, c, d, e, f, g, h] # Modify final values _lowerCAmelCase : int = [ ((element + mutated_hash_values[index]) % 0x1_0000_0000) for index, element in enumerate(self.hashes ) ] _lowerCAmelCase : List[str] = ''.join([hex(_A )[2:].zfill(8 ) for value in self.hashes] ) def __lowerCamelCase ( self ,_A ,_A ): '''simple docstring''' return 0xFFFF_FFFF & (value << (32 - rotations)) | (value >> rotations) class __UpperCamelCase ( unittest.TestCase ): def __lowerCamelCase ( self ): '''simple docstring''' import hashlib _lowerCAmelCase : Any = bytes('Test String' ,'utf-8' ) self.assertEqual(SHAaaa(_A ).hash ,hashlib.shaaaa(_A ).hexdigest() ) def lowerCamelCase__ ( ): '''simple docstring''' import doctest doctest.testmod() _lowerCAmelCase : Optional[Any] = argparse.ArgumentParser() parser.add_argument( '-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , ) parser.add_argument( '-f' , '--file' , dest='input_file' , help='Hash contents of a file' ) _lowerCAmelCase : Tuple = parser.parse_args() _lowerCAmelCase : List[str] = args.input_string # hash input should be a bytestring if args.input_file: with open(args.input_file , 'rb' ) as f: _lowerCAmelCase : int = f.read() else: _lowerCAmelCase : int = bytes(_lowerCamelCase , 'utf-8' ) print(SHAaaa(_lowerCamelCase ).hash ) if __name__ == "__main__": main()
16
1
"""simple docstring""" # Lint as: python3 import os import re import urllib.parse from pathlib import Path from typing import Callable, List, Optional, Union from zipfile import ZipFile from ..utils.file_utils import cached_path, hf_github_url from ..utils.logging import get_logger from ..utils.version import Version _a = get_logger(__name__) class _UpperCAmelCase: lowercase__ = """dummy_data""" lowercase__ = """datasets""" lowercase__ = False def __init__( self , __a , __a , __a , __a = None , __a = False , __a = True , __a = None , ) -> int: '''simple docstring''' _UpperCamelCase = 0 _UpperCamelCase = dataset_name _UpperCamelCase = cache_dir _UpperCamelCase = use_local_dummy_data _UpperCamelCase = config # download_callbacks take a single url as input _UpperCamelCase = download_callbacks or [] # if False, it doesn't load existing files and it returns the paths of the dummy files relative # to the dummy_data zip file root _UpperCamelCase = load_existing_dummy_data # TODO(PVP, QL) might need to make this more general _UpperCamelCase = str(lowerCAmelCase_) # to be downloaded _UpperCamelCase = None _UpperCamelCase = None @property def UpperCAmelCase ( self) -> Optional[Any]: '''simple docstring''' if self._dummy_file is None: _UpperCamelCase = self.download_dummy_data() return self._dummy_file @property def UpperCAmelCase ( self) -> List[Any]: '''simple docstring''' if self.config is not None: # structure is dummy / config_name / version_name return os.path.join('''dummy''' , self.config.name , self.version_name) # structure is dummy / version_name return os.path.join('''dummy''' , self.version_name) @property def UpperCAmelCase ( self) -> Any: '''simple docstring''' return os.path.join(self.dummy_data_folder , '''dummy_data.zip''') def UpperCAmelCase ( self) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = ( self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data ) _UpperCamelCase = cached_path( lowerCAmelCase_ , cache_dir=self.cache_dir , extract_compressed_file=lowerCAmelCase_ , force_extract=lowerCAmelCase_) return os.path.join(lowerCAmelCase_ , self.dummy_file_name) @property def UpperCAmelCase ( self) -> Optional[int]: '''simple docstring''' return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file) @property def UpperCAmelCase ( self) -> Optional[int]: '''simple docstring''' if self._bucket_url is None: _UpperCamelCase = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '''/''')) return self._bucket_url @property def UpperCAmelCase ( self) -> List[str]: '''simple docstring''' if os.path.isdir(self.dummy_file): return self.dummy_file # else cut off path to file -> example `xsum`. return "/".join(self.dummy_file.replace(os.sep , '''/''').split('''/''')[:-1]) def UpperCAmelCase ( self , __a , *__a) -> str: '''simple docstring''' if self.load_existing_dummy_data: # dummy data is downloaded and tested _UpperCamelCase = self.dummy_file else: # dummy data cannot be downloaded and only the path to dummy file is returned _UpperCamelCase = self.dummy_file_name # special case when data_url is a dict if isinstance(lowerCAmelCase_ , lowerCAmelCase_): return self.create_dummy_data_dict(lowerCAmelCase_ , lowerCAmelCase_) elif isinstance(lowerCAmelCase_ , (list, tuple)): return self.create_dummy_data_list(lowerCAmelCase_ , lowerCAmelCase_) else: return self.create_dummy_data_single(lowerCAmelCase_ , lowerCAmelCase_) def UpperCAmelCase ( self , __a , *__a) -> Optional[int]: '''simple docstring''' return self.download_and_extract(lowerCAmelCase_) def UpperCAmelCase ( self , __a , __a) -> Optional[Any]: '''simple docstring''' return self.download_and_extract(lowerCAmelCase_) def UpperCAmelCase ( self , __a , *__a , **__a) -> Optional[Any]: '''simple docstring''' return path def UpperCAmelCase ( self) -> Tuple: '''simple docstring''' return {} def UpperCAmelCase ( self , __a , __a) -> Tuple: '''simple docstring''' _UpperCamelCase = {} for key, single_urls in data_url.items(): for download_callback in self.download_callbacks: if isinstance(lowerCAmelCase_ , lowerCAmelCase_): for single_url in single_urls: download_callback(lowerCAmelCase_) else: _UpperCamelCase = single_urls download_callback(lowerCAmelCase_) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus if isinstance(lowerCAmelCase_ , lowerCAmelCase_): _UpperCamelCase = [os.path.join(lowerCAmelCase_ , urllib.parse.quote_plus(Path(lowerCAmelCase_).name)) for x in single_urls] else: _UpperCamelCase = single_urls _UpperCamelCase = os.path.join(lowerCAmelCase_ , urllib.parse.quote_plus(Path(lowerCAmelCase_).name)) _UpperCamelCase = value # make sure that values are unique if all(isinstance(lowerCAmelCase_ , lowerCAmelCase_) for i in dummy_data_dict.values()) and len(set(dummy_data_dict.values())) < len( dummy_data_dict.values()): # append key to value to make its name unique _UpperCamelCase = {key: value + key for key, value in dummy_data_dict.items()} return dummy_data_dict def UpperCAmelCase ( self , __a , __a) -> str: '''simple docstring''' _UpperCamelCase = [] # trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one _UpperCamelCase = all(bool(re.findall('''[0-9]{3,}-of-[0-9]{3,}''' , lowerCAmelCase_)) for url in data_url) _UpperCamelCase = all( url.startswith('''https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed''') for url in data_url) if data_url and (is_tf_records or is_pubmed_records): _UpperCamelCase = [data_url[0]] * len(lowerCAmelCase_) for single_url in data_url: for download_callback in self.download_callbacks: download_callback(lowerCAmelCase_) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus _UpperCamelCase = os.path.join(lowerCAmelCase_ , urllib.parse.quote_plus(single_url.split('''/''')[-1])) dummy_data_list.append(lowerCAmelCase_) return dummy_data_list def UpperCAmelCase ( self , __a , __a) -> Optional[int]: '''simple docstring''' for download_callback in self.download_callbacks: download_callback(lowerCAmelCase_) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus _UpperCamelCase = os.path.join(lowerCAmelCase_ , urllib.parse.quote_plus(data_url.split('''/''')[-1])) if os.path.exists(lowerCAmelCase_) or not self.load_existing_dummy_data: return value else: # Backward compatibility, maybe deprecate at one point. # For many datasets with single url calls to dl_manager.download_and_extract, # the dummy_data.zip file is actually the zipped downloaded file # while now we expected the dummy_data.zip file to be a directory containing # the downloaded file. return path_to_dummy_data def UpperCAmelCase ( self) -> Tuple: '''simple docstring''' pass def UpperCAmelCase ( self) -> Optional[int]: '''simple docstring''' pass def UpperCAmelCase ( self , __a) -> List[Any]: '''simple docstring''' def _iter_archive_members(__a): # this preserves the order of the members inside the ZIP archive _UpperCamelCase = Path(self.dummy_file).parent _UpperCamelCase = path.relative_to(lowerCAmelCase_) with ZipFile(self.local_path_to_dummy_data) as zip_file: _UpperCamelCase = zip_file.namelist() for member in members: if member.startswith(relative_path.as_posix()): yield dummy_parent_path.joinpath(lowerCAmelCase_) _UpperCamelCase = Path(lowerCAmelCase_) _UpperCamelCase = _iter_archive_members(lowerCAmelCase_) if self.use_local_dummy_data else path.rglob('''*''') for file_path in file_paths: if file_path.is_file() and not file_path.name.startswith(('''.''', '''__''')): yield file_path.relative_to(lowerCAmelCase_).as_posix(), file_path.open('''rb''') def UpperCAmelCase ( self , __a) -> Optional[int]: '''simple docstring''' if not isinstance(lowerCAmelCase_ , lowerCAmelCase_): _UpperCamelCase = [paths] for path in paths: if os.path.isfile(lowerCAmelCase_): if os.path.basename(lowerCAmelCase_).startswith(('''.''', '''__''')): return yield path else: for dirpath, dirnames, filenames in os.walk(lowerCAmelCase_): if os.path.basename(lowerCAmelCase_).startswith(('''.''', '''__''')): continue dirnames.sort() for filename in sorted(lowerCAmelCase_): if filename.startswith(('''.''', '''__''')): continue yield os.path.join(lowerCAmelCase_ , lowerCAmelCase_)
19
# NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from ...utils import deprecate from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401 deprecate( "stable diffusion controlnet", "0.22.0", "Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.", standard_warn=False, stacklevel=3, )
393
0
import warnings from ...utils import logging from .image_processing_yolos import YolosImageProcessor lowercase = logging.get_logger(__name__) class UpperCamelCase_ ( snake_case_ ): '''simple docstring''' def __init__( self , *a , **a ) -> None: warnings.warn( 'The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use YolosImageProcessor instead.' , a , ) super().__init__(*a , **a )
607
from __future__ import annotations import os import tempfile import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import is_tensorflow_text_available, is_tf_available from transformers.testing_utils import require_tensorflow_text, require_tf, slow from ..test_modeling_tf_common import floats_tensor from .test_framework_agnostic import GenerationIntegrationTestsMixin if is_tf_available(): import tensorflow as tf from transformers import ( AutoTokenizer, TFAutoModelForCausalLM, TFAutoModelForSeqaSeqLM, TFAutoModelForSpeechSeqaSeq, TFAutoModelForVisionaSeq, TFBartForConditionalGeneration, TFLogitsProcessorList, TFMinLengthLogitsProcessor, tf_top_k_top_p_filtering, ) if is_tensorflow_text_available(): import tensorflow_text as text @require_tf class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' def _UpperCamelCase ( self ) -> str: snake_case_ = tf.convert_to_tensor( [ [ 8.2_220_991, # 3rd highest value; idx. 0 -0.5_620_044, 5.23_229_752, 4.0_386_393, -6.8_798_378, -0.54_785_802, -3.2_012_153, 2.92_777_176, 1.88_171_953, 7.35_341_276, # 5th highest value; idx. 9 8.43_207_833, # 2nd highest value; idx. 10 -9.85_711_836, -5.96_209_236, -1.13_039_161, -7.1_115_294, -0.8_369_633, -5.3_186_408, 7.06_427_407, 0.81_369_344, -0.82_023_817, -5.9_179_796, 0.58_813_443, -6.99_778_438, 4.71_551_189, -0.18_771_637, 7.44_020_759, # 4th highest value; idx. 25 9.38_450_987, # 1st highest value; idx. 26 2.12_662_941, -9.32_562_038, 2.35_652_522, ], # cummulative prob of 5 highest values <= 0.6 [ 0.58_425_518, 4.53_139_238, -5.57_510_464, -6.28_030_699, -7.19_529_503, -4.02_122_551, 1.39_337_037, -6.06_707_057, 1.59_480_517, -9.643_119, 0.03_907_799, 0.67_231_762, -8.88_206_726, 6.27_115_922, # 4th highest value; idx. 13 2.28_520_723, 4.82_767_506, 4.30_421_368, 8.8_275_313, # 2nd highest value; idx. 17 5.44_029_958, # 5th highest value; idx. 18 -4.4_735_794, 7.38_579_536, # 3rd highest value; idx. 20 -2.91_051_663, 2.61_946_077, -2.5_674_762, -9.48_959_302, -4.02_922_645, -1.35_416_918, 9.67_702_323, # 1st highest value; idx. 27 -5.89_478_553, 1.85_370_467, ], # cummulative prob of 5 highest values <= 0.6 ] , dtype=tf.floataa , ) snake_case_ = tf.convert_to_tensor( [[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above snake_case_ = tf.convert_to_tensor( [8.222_099, 7.3_534_126, 8.432_078, 7.4_402_075, 9.38_451, 6.271_159, 8.827_531, 5.4_402_995, 7.3_857_956, 9.677_023] , dtype=tf.floataa , ) # expected non filtered values as noted above snake_case_ = tf_top_k_top_p_filtering(a , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 ) snake_case_ = output[output != -float('inf' )] snake_case_ = tf.cast( tf.where(tf.not_equal(a , tf.constant(-float('inf' ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , ) tf.debugging.assert_near(a , a , rtol=1E-12 ) tf.debugging.assert_equal(a , a ) @require_tf class UpperCamelCase_ ( unittest.TestCase , snake_case_ ): '''simple docstring''' if is_tf_available(): lowerCAmelCase = { '''AutoModelForCausalLM''': TFAutoModelForCausalLM, '''AutoModelForSpeechSeq2Seq''': TFAutoModelForSpeechSeqaSeq, '''AutoModelForSeq2SeqLM''': TFAutoModelForSeqaSeqLM, '''AutoModelForVision2Seq''': TFAutoModelForVisionaSeq, '''LogitsProcessorList''': TFLogitsProcessorList, '''MinLengthLogitsProcessor''': TFMinLengthLogitsProcessor, '''create_tensor_fn''': tf.convert_to_tensor, '''floats_tensor''': floats_tensor, '''return_tensors''': '''tf''', } @slow def _UpperCamelCase ( self ) -> Optional[int]: # TF-only test: tf.saved_model export snake_case_ = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ) snake_case_ = 2 snake_case_ = 2 class UpperCamelCase_ ( tf.Module ): '''simple docstring''' def __init__( self , a ) -> Any: super(a , self ).__init__() snake_case_ = model @tf.function( input_signature=( tf.TensorSpec((None, input_length) , tf.intaa , name='input_ids' ), tf.TensorSpec((None, input_length) , tf.intaa , name='attention_mask' ), ) , jit_compile=a , ) def _UpperCamelCase ( self , a , a ) -> Optional[Any]: snake_case_ = self.model.generate( input_ids=a , attention_mask=a , max_new_tokens=a , return_dict_in_generate=a , ) return {"sequences": outputs["sequences"]} snake_case_ = [[2, 0], [1_02, 1_03]] snake_case_ = [[1, 0], [1, 1]] snake_case_ = DummyModel(model=a ) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(a , a , signatures={'serving_default': dummy_model.serving} ) snake_case_ = tf.saved_model.load(a ).signatures['serving_default'] for batch_size in range(1 , len(a ) + 1 ): snake_case_ = { 'input_ids': tf.constant(dummy_input_ids[:batch_size] ), 'attention_mask': tf.constant(dummy_attention_masks[:batch_size] ), } snake_case_ = serving_func(**a )['sequences'] snake_case_ = test_model.generate(**a , max_new_tokens=a ) tf.debugging.assert_equal(a , a ) @slow def _UpperCamelCase ( self ) -> Dict: # TF-only test: tf.saved_model export snake_case_ = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ) snake_case_ = 1 snake_case_ = 2 class UpperCamelCase_ ( tf.Module ): '''simple docstring''' def __init__( self , a ) -> int: super(a , self ).__init__() snake_case_ = model @tf.function( input_signature=( tf.TensorSpec((batch_size, None) , tf.intaa , name='input_ids' ), tf.TensorSpec((batch_size, None) , tf.intaa , name='attention_mask' ), ) , jit_compile=a , ) def _UpperCamelCase ( self , a , a ) -> Union[str, Any]: snake_case_ = self.model.generate( input_ids=a , attention_mask=a , max_new_tokens=a , return_dict_in_generate=a , ) return {"sequences": outputs["sequences"]} snake_case_ = [[2], [1_02, 1_03]] snake_case_ = [[1], [1, 1]] snake_case_ = DummyModel(model=a ) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(a , a , signatures={'serving_default': dummy_model.serving} ) snake_case_ = tf.saved_model.load(a ).signatures['serving_default'] for input_row in range(len(a ) ): snake_case_ = { 'input_ids': tf.constant([dummy_input_ids[input_row]] ), 'attention_mask': tf.constant([dummy_attention_masks[input_row]] ), } snake_case_ = serving_func(**a )['sequences'] snake_case_ = test_model.generate(**a , max_new_tokens=a ) tf.debugging.assert_equal(a , a ) @slow @require_tensorflow_text def _UpperCamelCase ( self ) -> Any: # TF-only test: tf.saved_model export with tempfile.TemporaryDirectory() as tmp_dir: # file needed to load the TF tokenizer hf_hub_download(repo_id='google/flan-t5-small' , filename='spiece.model' , local_dir=a ) class UpperCamelCase_ ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self ) -> Any: super().__init__() snake_case_ = text.SentencepieceTokenizer( model=tf.io.gfile.GFile(os.path.join(a , 'spiece.model' ) , 'rb' ).read() ) snake_case_ = TFAutoModelForSeqaSeqLM.from_pretrained('hf-internal-testing/tiny-random-t5' ) def _UpperCamelCase ( self , a , *a , **a ) -> int: snake_case_ = self.tokenizer.tokenize(a ) snake_case_ , snake_case_ = text.pad_model_inputs( a , max_seq_length=64 , pad_value=self.model.config.pad_token_id ) snake_case_ = self.model.generate(input_ids=a , attention_mask=a ) return self.tokenizer.detokenize(a ) snake_case_ = CompleteSentenceTransformer() snake_case_ = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='inputs' ) snake_case_ = complete_model(a ) snake_case_ = tf.keras.Model(a , a ) keras_model.save(a ) def _UpperCamelCase ( self ) -> Union[str, Any]: # Has PT equivalent: this test relies on random sampling snake_case_ = { 'do_sample': True, 'num_beams': 1, 'top_p': 0.7, 'top_k': 10, 'temperature': 0.7, } snake_case_ = 14 snake_case_ = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' ) snake_case_ = 'Hello, my dog is cute and' snake_case_ = tokenizer(a , return_tensors='tf' ) snake_case_ = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ) snake_case_ = 6_38 # forces the generation to happen on CPU, to avoid GPU-related quirks with tf.device(':/CPU:0' ): tf.random.set_seed(0 ) snake_case_ = model.generate(**a , eos_token_id=a , **a ) self.assertTrue(expectation == len(generated_tokens[0] ) ) snake_case_ = [6_38, 1_98] with tf.device(':/CPU:0' ): tf.random.set_seed(0 ) snake_case_ = model.generate(**a , eos_token_id=a , **a ) self.assertTrue(expectation == len(generated_tokens[0] ) ) def _UpperCamelCase ( self ) -> Any: # Has PT equivalent: ample use of framework-specific code snake_case_ = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bart' ) snake_case_ = 'Hugging Face is a technology company based in New York and Paris.' snake_case_ = bart_tokenizer(a , return_tensors='tf' ).input_ids snake_case_ = TFBartForConditionalGeneration.from_pretrained('hf-internal-testing/tiny-random-bart' ) snake_case_ = bart_model.generate(a ).numpy() class UpperCamelCase_ ( snake_case_ ): '''simple docstring''' def _UpperCamelCase ( self , a , a=None , **a ) -> List[str]: return super().call(a , **a ) snake_case_ = FakeBart.from_pretrained('hf-internal-testing/tiny-random-bart' ) snake_case_ = bart_model.generate(a , foo='bar' ).numpy() self.assertTrue(np.array_equal(a , a ) ) class UpperCamelCase_ ( bart_model.model.encoder.__class__ ): '''simple docstring''' def _UpperCamelCase ( self , a , **a ) -> List[Any]: return super().call(a , **a ) snake_case_ = FakeEncoder(bart_model.config , bart_model.model.shared ) snake_case_ = fake_encoder # Normal generation still works (the output will be different because the encoder weights are different) snake_case_ = bart_model.generate(a ).numpy() with self.assertRaises(a ): # FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo" bart_model.generate(a , foo='bar' )
607
1
from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from transformers.modeling_outputs import BaseModelOutput from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING SCREAMING_SNAKE_CASE :Optional[int] = logging.get_logger(__name__) @add_end_docstrings(_UpperCAmelCase ) class __lowerCAmelCase ( _UpperCAmelCase ): """simple docstring""" def __init__( self : Tuple , **_lowerCAmelCase : str ) -> List[str]: """simple docstring""" super().__init__(**SCREAMING_SNAKE_CASE_ ) if self.framework == "tf": raise ValueError(F'''The {self.__class__} is only available in PyTorch.''' ) requires_backends(self , "vision" ) self.check_model_type(SCREAMING_SNAKE_CASE_ ) def __call__( self : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple = None , **_lowerCAmelCase : List[str] , ) -> Optional[Any]: """simple docstring""" if "text_queries" in kwargs: snake_case_ = kwargs.pop("text_queries" ) if isinstance(SCREAMING_SNAKE_CASE_ , (str, Image.Image) ): snake_case_ = {'image': image, 'candidate_labels': candidate_labels} else: snake_case_ = image snake_case_ = super().__call__(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) return results def lowerCAmelCase__ ( self : Optional[int] , **_lowerCAmelCase : Union[str, Any] ) -> List[Any]: """simple docstring""" snake_case_ = {} if "threshold" in kwargs: snake_case_ = kwargs['threshold'] if "top_k" in kwargs: snake_case_ = kwargs['top_k'] return {}, {}, postprocess_params def lowerCAmelCase__ ( self : Tuple , _lowerCAmelCase : Dict ) -> List[str]: """simple docstring""" snake_case_ = load_image(inputs["image"] ) snake_case_ = inputs['candidate_labels'] if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): snake_case_ = candidate_labels.split("," ) snake_case_ = torch.tensor([[image.height, image.width]] , dtype=torch.intaa ) for i, candidate_label in enumerate(SCREAMING_SNAKE_CASE_ ): snake_case_ = self.tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors=self.framework ) snake_case_ = self.image_processor(SCREAMING_SNAKE_CASE_ , return_tensors=self.framework ) yield { "is_last": i == len(SCREAMING_SNAKE_CASE_ ) - 1, "target_size": target_size, "candidate_label": candidate_label, **text_inputs, **image_features, } def lowerCAmelCase__ ( self : List[str] , _lowerCAmelCase : Optional[int] ) -> Any: """simple docstring""" snake_case_ = model_inputs.pop("target_size" ) snake_case_ = model_inputs.pop("candidate_label" ) snake_case_ = model_inputs.pop("is_last" ) snake_case_ = self.model(**SCREAMING_SNAKE_CASE_ ) snake_case_ = {'target_size': target_size, 'candidate_label': candidate_label, 'is_last': is_last, **outputs} return model_outputs def lowerCAmelCase__ ( self : List[str] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict=0.1 , _lowerCAmelCase : Optional[Any]=None ) -> Tuple: """simple docstring""" snake_case_ = [] for model_output in model_outputs: snake_case_ = model_output['candidate_label'] snake_case_ = BaseModelOutput(SCREAMING_SNAKE_CASE_ ) snake_case_ = self.image_processor.post_process_object_detection( outputs=SCREAMING_SNAKE_CASE_ , threshold=SCREAMING_SNAKE_CASE_ , target_sizes=model_output["target_size"] )[0] for index in outputs["scores"].nonzero(): snake_case_ = outputs['scores'][index].item() snake_case_ = self._get_bounding_box(outputs["boxes"][index][0] ) snake_case_ = {'score': score, 'label': label, 'box': box} results.append(SCREAMING_SNAKE_CASE_ ) snake_case_ = sorted(SCREAMING_SNAKE_CASE_ , key=lambda _lowerCAmelCase : x["score"] , reverse=SCREAMING_SNAKE_CASE_ ) if top_k: snake_case_ = results[:top_k] return results def lowerCAmelCase__ ( self : str , _lowerCAmelCase : List[Any] ) -> Dict[str, int]: """simple docstring""" if self.framework != "pt": raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch." ) snake_case_ = box.int().tolist() snake_case_ = { 'xmin': xmin, 'ymin': ymin, 'xmax': xmax, 'ymax': ymax, } return bbox
283
'''simple docstring''' from collections import namedtuple import requests from lxml import html # type: ignore A__ : Tuple = namedtuple("""covid_data""", """cases deaths recovered""") def UpperCAmelCase__ ( UpperCAmelCase_ : str = "https://www.worldometers.info/coronavirus/" ) -> covid_data: __lowerCamelCase : Union[str, Any] = '//div[@class = "maincounter-number"]/span/text()' return covid_data(*html.fromstring(requests.get(UpperCAmelCase_ ).content ).xpath(UpperCAmelCase_ ) ) A__ : str = """Total COVID-19 cases in the world: {} Total deaths due to COVID-19 in the world: {} Total COVID-19 patients recovered in the world: {}""" print(fmt.format(*covid_stats()))
13
0
"""simple docstring""" import json from typing import Iterator, List, Union from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers from tokenizers.implementations.base_tokenizer import BaseTokenizer from tokenizers.models import Unigram from tokenizers.processors import TemplateProcessing class UpperCamelCase ( snake_case_ ): def __init__( self ,__UpperCamelCase = "▁" ,__UpperCamelCase = True ,__UpperCamelCase = "<unk>" ,__UpperCamelCase = "</s>" ,__UpperCamelCase = "<pad>" ,) -> Any: '''simple docstring''' lowercase_ : str = { 'pad': {'id': 0, 'token': pad_token}, 'eos': {'id': 1, 'token': eos_token}, 'unk': {'id': 2, 'token': unk_token}, } lowercase_ : List[str] = [None] * len(self.special_tokens ) for token_dict in self.special_tokens.values(): lowercase_ : Dict = token_dict['token'] lowercase_ : Tuple = Tokenizer(Unigram() ) lowercase_ : Tuple = normalizers.Sequence( [ normalizers.Nmt(), normalizers.NFKC(), normalizers.Replace(Regex(' {2,}' ) ,' ' ), normalizers.Lowercase(), ] ) lowercase_ : Dict = pre_tokenizers.Sequence( [ pre_tokenizers.Metaspace(replacement=__UpperCamelCase ,add_prefix_space=__UpperCamelCase ), pre_tokenizers.Digits(individual_digits=__UpperCamelCase ), pre_tokenizers.Punctuation(), ] ) lowercase_ : Any = decoders.Metaspace(replacement=__UpperCamelCase ,add_prefix_space=__UpperCamelCase ) lowercase_ : List[Any] = TemplateProcessing( single=f'''$A {self.special_tokens["eos"]["token"]}''' ,special_tokens=[(self.special_tokens['eos']['token'], self.special_tokens['eos']['id'])] ,) lowercase_ : int = { 'model': 'SentencePieceUnigram', 'replacement': replacement, 'add_prefix_space': add_prefix_space, } super().__init__(__UpperCamelCase ,__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = 8000 ,__UpperCamelCase = True ,) -> Tuple: '''simple docstring''' lowercase_ : Union[str, Any] = trainers.UnigramTrainer( vocab_size=__UpperCamelCase ,special_tokens=self.special_tokens_list ,show_progress=__UpperCamelCase ,) if isinstance(__UpperCamelCase ,__UpperCamelCase ): lowercase_ : str = [files] self._tokenizer.train(__UpperCamelCase ,trainer=__UpperCamelCase ) self.add_unk_id() def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = 8000 ,__UpperCamelCase = True ,) -> List[Any]: '''simple docstring''' lowercase_ : str = trainers.UnigramTrainer( vocab_size=__UpperCamelCase ,special_tokens=self.special_tokens_list ,show_progress=__UpperCamelCase ,) self._tokenizer.train_from_iterator(__UpperCamelCase ,trainer=__UpperCamelCase ) self.add_unk_id() def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : List[str] = json.loads(self._tokenizer.to_str() ) lowercase_ : List[Any] = self.special_tokens['unk']['id'] lowercase_ : Tuple = Tokenizer.from_str(json.dumps(__UpperCamelCase ) )
701
"""simple docstring""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE ={ "facebook/wav2vec2-base-960h": "https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json", # See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2 } class UpperCamelCase ( lowercase_ ): lowercase = 'wav2vec2' def __init__( self ,__UpperCamelCase=32 ,__UpperCamelCase=768 ,__UpperCamelCase=12 ,__UpperCamelCase=12 ,__UpperCamelCase=3072 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.0 ,__UpperCamelCase=0.0 ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.02 ,__UpperCamelCase=1e-5 ,__UpperCamelCase="group" ,__UpperCamelCase="gelu" ,__UpperCamelCase=(512, 512, 512, 512, 512, 512, 512) ,__UpperCamelCase=(5, 2, 2, 2, 2, 2, 2) ,__UpperCamelCase=(10, 3, 3, 3, 3, 2, 2) ,__UpperCamelCase=False ,__UpperCamelCase=128 ,__UpperCamelCase=16 ,__UpperCamelCase=False ,__UpperCamelCase=True ,__UpperCamelCase=0.05 ,__UpperCamelCase=10 ,__UpperCamelCase=2 ,__UpperCamelCase=0.0 ,__UpperCamelCase=10 ,__UpperCamelCase=0 ,__UpperCamelCase=320 ,__UpperCamelCase=2 ,__UpperCamelCase=0.1 ,__UpperCamelCase=100 ,__UpperCamelCase=256 ,__UpperCamelCase=256 ,__UpperCamelCase=0.1 ,__UpperCamelCase="sum" ,__UpperCamelCase=False ,__UpperCamelCase=False ,__UpperCamelCase=256 ,__UpperCamelCase=(512, 512, 512, 512, 1500) ,__UpperCamelCase=(5, 3, 3, 1, 1) ,__UpperCamelCase=(1, 2, 3, 1, 1) ,__UpperCamelCase=512 ,__UpperCamelCase=0 ,__UpperCamelCase=1 ,__UpperCamelCase=2 ,__UpperCamelCase=False ,__UpperCamelCase=3 ,__UpperCamelCase=2 ,__UpperCamelCase=3 ,__UpperCamelCase=None ,__UpperCamelCase=None ,**__UpperCamelCase ,) -> Optional[Any]: '''simple docstring''' super().__init__(**__UpperCamelCase ,pad_token_id=__UpperCamelCase ,bos_token_id=__UpperCamelCase ,eos_token_id=__UpperCamelCase ) lowercase_ : Optional[Any] = hidden_size lowercase_ : Tuple = feat_extract_norm lowercase_ : Dict = feat_extract_activation lowercase_ : List[str] = list(__UpperCamelCase ) lowercase_ : str = list(__UpperCamelCase ) lowercase_ : Dict = list(__UpperCamelCase ) lowercase_ : Optional[Any] = conv_bias lowercase_ : Dict = num_conv_pos_embeddings lowercase_ : List[str] = num_conv_pos_embedding_groups lowercase_ : Optional[Any] = len(self.conv_dim ) lowercase_ : Any = num_hidden_layers lowercase_ : List[Any] = intermediate_size lowercase_ : List[Any] = hidden_act lowercase_ : Optional[int] = num_attention_heads lowercase_ : int = hidden_dropout lowercase_ : Dict = attention_dropout lowercase_ : Union[str, Any] = activation_dropout lowercase_ : Tuple = feat_proj_dropout lowercase_ : List[str] = final_dropout lowercase_ : Union[str, Any] = layerdrop lowercase_ : List[str] = layer_norm_eps lowercase_ : Optional[int] = initializer_range lowercase_ : List[Any] = vocab_size lowercase_ : Optional[int] = do_stable_layer_norm lowercase_ : Union[str, Any] = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( 'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==' ' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =' f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,''' f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowercase_ : Dict = apply_spec_augment lowercase_ : Optional[int] = mask_time_prob lowercase_ : Union[str, Any] = mask_time_length lowercase_ : List[str] = mask_time_min_masks lowercase_ : List[str] = mask_feature_prob lowercase_ : Any = mask_feature_length lowercase_ : List[Any] = mask_feature_min_masks # parameters for pretraining with codevector quantized representations lowercase_ : List[Any] = num_codevectors_per_group lowercase_ : Optional[int] = num_codevector_groups lowercase_ : Dict = contrastive_logits_temperature lowercase_ : int = feat_quantizer_dropout lowercase_ : Optional[int] = num_negatives lowercase_ : str = codevector_dim lowercase_ : str = proj_codevector_dim lowercase_ : Optional[Any] = diversity_loss_weight # ctc loss lowercase_ : Tuple = ctc_loss_reduction lowercase_ : int = ctc_zero_infinity # adapter lowercase_ : int = add_adapter lowercase_ : Dict = adapter_kernel_size lowercase_ : List[str] = adapter_stride lowercase_ : Dict = num_adapter_layers lowercase_ : Dict = output_hidden_size or hidden_size lowercase_ : Optional[Any] = adapter_attn_dim # SequenceClassification-specific parameter. Feel free to ignore for other classes. lowercase_ : Dict = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. lowercase_ : Any = list(__UpperCamelCase ) lowercase_ : str = list(__UpperCamelCase ) lowercase_ : Any = list(__UpperCamelCase ) lowercase_ : Tuple = xvector_output_dim @property def _UpperCAmelCase ( self ) -> str: '''simple docstring''' return functools.reduce(operator.mul ,self.conv_stride ,1 )
477
0