code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
'''simple docstring''' import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ConvNextConfig, UperNetConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import UperNetForSemanticSegmentation from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowerCAmelCase_ : '''simple docstring''' def __init__( self : str , _UpperCAmelCase : Dict , _UpperCAmelCase : Any=13 , _UpperCAmelCase : str=32 , _UpperCAmelCase : List[Any]=3 , _UpperCAmelCase : int=4 , _UpperCAmelCase : List[Any]=[10, 20, 30, 40] , _UpperCAmelCase : Any=[2, 2, 3, 2] , _UpperCAmelCase : Dict=True , _UpperCAmelCase : int=True , _UpperCAmelCase : Tuple=37 , _UpperCAmelCase : Tuple="gelu" , _UpperCAmelCase : Optional[Any]=10 , _UpperCAmelCase : Optional[Any]=0.02 , _UpperCAmelCase : Dict=["stage2", "stage3", "stage4"] , _UpperCAmelCase : Dict=3 , _UpperCAmelCase : Any=None , ): """simple docstring""" UpperCAmelCase__ = parent UpperCAmelCase__ = batch_size UpperCAmelCase__ = image_size UpperCAmelCase__ = num_channels UpperCAmelCase__ = num_stages UpperCAmelCase__ = hidden_sizes UpperCAmelCase__ = depths UpperCAmelCase__ = is_training UpperCAmelCase__ = use_labels UpperCAmelCase__ = intermediate_size UpperCAmelCase__ = hidden_act UpperCAmelCase__ = type_sequence_label_size UpperCAmelCase__ = initializer_range UpperCAmelCase__ = out_features UpperCAmelCase__ = num_labels UpperCAmelCase__ = scope UpperCAmelCase__ = num_stages def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" UpperCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase__ = None if self.use_labels: UpperCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase__ = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" return ConvNextConfig( num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): """simple docstring""" return UperNetConfig( backbone_config=self.get_backbone_config() , hidden_size=5_12 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=_UpperCAmelCase , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=2_56 , auxiliary_num_convs=1 , auxiliary_concat_input=_UpperCAmelCase , loss_ignore_index=2_55 , num_labels=self.num_labels , ) def SCREAMING_SNAKE_CASE__ ( self : Any , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Dict ): """simple docstring""" UpperCAmelCase__ = UperNetForSemanticSegmentation(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() UpperCAmelCase__ = model(_UpperCAmelCase ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) ) def SCREAMING_SNAKE_CASE__ ( self : Dict ): """simple docstring""" UpperCAmelCase__ = self.prepare_config_and_inputs() ( ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ) = config_and_inputs UpperCAmelCase__ = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class lowerCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ : Any = (UperNetForSemanticSegmentation,) if is_torch_available() else () lowerCAmelCase_ : str = {"""image-segmentation""": UperNetForSemanticSegmentation} if is_torch_available() else {} lowerCAmelCase_ : Union[str, Any] = False lowerCAmelCase_ : int = False lowerCAmelCase_ : str = False lowerCAmelCase_ : Optional[Any] = False lowerCAmelCase_ : Any = False lowerCAmelCase_ : Union[str, Any] = False def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): """simple docstring""" UpperCAmelCase__ = UperNetModelTester(self ) UpperCAmelCase__ = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 ) def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" return def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ = model_class(_UpperCAmelCase ) UpperCAmelCase__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase__ = [*signature.parameters.keys()] UpperCAmelCase__ = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : int ): """simple docstring""" UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*_UpperCAmelCase ) @unittest.skip(reason="""UperNet does not use inputs_embeds""" ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" pass @unittest.skip(reason="""UperNet does not support input and output embeddings""" ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" pass @unittest.skip(reason="""UperNet does not have a base model""" ) def SCREAMING_SNAKE_CASE__ ( self : int ): """simple docstring""" pass @unittest.skip(reason="""UperNet does not have a base model""" ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" pass @require_torch_multi_gpu @unittest.skip(reason="""UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): """simple docstring""" pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def SCREAMING_SNAKE_CASE__ ( self : int ): """simple docstring""" pass def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" def check_hidden_states_output(_UpperCAmelCase : Any , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] ): UpperCAmelCase__ = model_class(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() with torch.no_grad(): UpperCAmelCase__ = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) ) UpperCAmelCase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states UpperCAmelCase__ = self.model_tester.num_stages self.assertEqual(len(_UpperCAmelCase ) , expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ = True check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase__ = True check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): """simple docstring""" UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase__ = _config_zero_init(_UpperCAmelCase ) UpperCAmelCase__ = _config_zero_init(configs_no_init.backbone_config ) for model_class in self.all_model_classes: UpperCAmelCase__ = model_class(config=_UpperCAmelCase ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @unittest.skip(reason="""UperNet does not have tied weights""" ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): """simple docstring""" pass @slow def SCREAMING_SNAKE_CASE__ ( self : Dict ): """simple docstring""" for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ = UperNetForSemanticSegmentation.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) def _UpperCamelCase ( ): '''simple docstring''' UpperCAmelCase__ = hf_hub_download( repo_id="""hf-internal-testing/fixtures_ade20k""" , repo_type="""dataset""" , filename="""ADE_val_00000001.jpg""" ) UpperCAmelCase__ = Image.open(SCREAMING_SNAKE_CASE__ ).convert("""RGB""" ) return image @require_torch @require_vision @slow class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" UpperCAmelCase__ = AutoImageProcessor.from_pretrained("""openmmlab/upernet-swin-tiny""" ) UpperCAmelCase__ = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-swin-tiny""" ).to(_UpperCAmelCase ) UpperCAmelCase__ = prepare_img() UpperCAmelCase__ = processor(images=_UpperCAmelCase , return_tensors="""pt""" ).to(_UpperCAmelCase ) with torch.no_grad(): UpperCAmelCase__ = model(**_UpperCAmelCase ) UpperCAmelCase__ = torch.Size((1, model.config.num_labels, 5_12, 5_12) ) self.assertEqual(outputs.logits.shape , _UpperCAmelCase ) UpperCAmelCase__ = torch.tensor( [[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ).to(_UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _UpperCAmelCase , atol=1E-4 ) ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" UpperCAmelCase__ = AutoImageProcessor.from_pretrained("""openmmlab/upernet-convnext-tiny""" ) UpperCAmelCase__ = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-convnext-tiny""" ).to(_UpperCAmelCase ) UpperCAmelCase__ = prepare_img() UpperCAmelCase__ = processor(images=_UpperCAmelCase , return_tensors="""pt""" ).to(_UpperCAmelCase ) with torch.no_grad(): UpperCAmelCase__ = model(**_UpperCAmelCase ) UpperCAmelCase__ = torch.Size((1, model.config.num_labels, 5_12, 5_12) ) self.assertEqual(outputs.logits.shape , _UpperCAmelCase ) UpperCAmelCase__ = torch.tensor( [[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ).to(_UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _UpperCAmelCase , atol=1E-4 ) )
346
'''simple docstring''' import json import os import unittest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ : int = MgpstrTokenizer lowerCAmelCase_ : List[str] = False lowerCAmelCase_ : Optional[int] = {} lowerCAmelCase_ : Any = False def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): """simple docstring""" super().setUp() # fmt: off UpperCAmelCase__ = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""] # fmt: on UpperCAmelCase__ = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) ) UpperCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(_UpperCAmelCase ) + """\n""" ) def SCREAMING_SNAKE_CASE__ ( self : List[str] , **_UpperCAmelCase : Optional[Any] ): """simple docstring""" return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : Tuple ): """simple docstring""" UpperCAmelCase__ = """tester""" UpperCAmelCase__ = """tester""" return input_text, output_text @unittest.skip("""MGP-STR always lower cases letters.""" ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" pass def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" UpperCAmelCase__ = self.get_tokenizers(do_lower_case=_UpperCAmelCase ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): UpperCAmelCase__ = """[SPECIAL_TOKEN]""" tokenizer.add_special_tokens({"""cls_token""": special_token} ) UpperCAmelCase__ = tokenizer.encode([special_token] , add_special_tokens=_UpperCAmelCase ) self.assertEqual(len(_UpperCAmelCase ) , 1 ) UpperCAmelCase__ = tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase ) self.assertTrue(special_token not in decoded ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" UpperCAmelCase__ = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): UpperCAmelCase__ , UpperCAmelCase__ = self.get_input_output_texts(_UpperCAmelCase ) UpperCAmelCase__ = tokenizer.tokenize(_UpperCAmelCase ) UpperCAmelCase__ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) UpperCAmelCase__ = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = tokenizer.convert_ids_to_tokens(_UpperCAmelCase ) self.assertNotEqual(len(_UpperCAmelCase ) , 0 ) UpperCAmelCase__ = tokenizer.decode(_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase ) self.assertEqual(text_a.replace(""" """ , """""" ) , _UpperCAmelCase ) @unittest.skip("""MGP-STR tokenizer only handles one sequence.""" ) def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" pass @unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""" ) def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" pass
346
1
'''simple docstring''' def _UpperCamelCase ( ): '''simple docstring''' UpperCAmelCase__ = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] UpperCAmelCase__ = 6 UpperCAmelCase__ = 1 UpperCAmelCase__ = 1901 UpperCAmelCase__ = 0 while year < 2001: day += 7 if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0): if day > days_per_month[month - 1] and month != 2: month += 1 UpperCAmelCase__ = day - days_per_month[month - 2] elif day > 29 and month == 2: month += 1 UpperCAmelCase__ = day - 29 else: if day > days_per_month[month - 1]: month += 1 UpperCAmelCase__ = day - days_per_month[month - 2] if month > 12: year += 1 UpperCAmelCase__ = 1 if year < 2001 and day == 1: sundays += 1 return sundays if __name__ == "__main__": print(solution())
346
'''simple docstring''' from abc import ABC, abstractmethod from typing import List, Optional class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' def __init__( self : Union[str, Any] ): """simple docstring""" self.test() def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" UpperCAmelCase__ = 0 UpperCAmelCase__ = False while not completed: if counter == 1: self.reset() UpperCAmelCase__ = self.advance() if not self.does_advance(_UpperCAmelCase ): raise Exception( """Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.""" ) UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.update(_UpperCAmelCase ) counter += 1 if counter > 1_00_00: raise Exception("""update() does not fulfill the constraint.""" ) if self.remaining() != 0: raise Exception("""Custom Constraint is not defined correctly.""" ) @abstractmethod def SCREAMING_SNAKE_CASE__ ( self : Dict ): """simple docstring""" raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : int ): """simple docstring""" raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def SCREAMING_SNAKE_CASE__ ( self : Any , _UpperCAmelCase : int ): """simple docstring""" raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): """simple docstring""" raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : List[Any]=False ): """simple docstring""" raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' def __init__( self : Union[str, Any] , _UpperCAmelCase : List[int] ): """simple docstring""" super(_UpperCAmelCase , self ).__init__() if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or len(_UpperCAmelCase ) == 0: raise ValueError(f'''`token_ids` has to be a non-empty list, but is {token_ids}.''' ) if any((not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or token_id < 0) for token_id in token_ids ): raise ValueError(f'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' ) UpperCAmelCase__ = token_ids UpperCAmelCase__ = len(self.token_ids ) UpperCAmelCase__ = -1 # the index of the currently fulfilled step UpperCAmelCase__ = False def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" if self.completed: return None return self.token_ids[self.fulfilled_idx + 1] def SCREAMING_SNAKE_CASE__ ( self : List[str] , _UpperCAmelCase : int ): """simple docstring""" if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(_UpperCAmelCase )}''' ) if self.completed: return False return token_id == self.token_ids[self.fulfilled_idx + 1] def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , _UpperCAmelCase : int ): """simple docstring""" if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(_UpperCAmelCase )}''' ) UpperCAmelCase__ = False UpperCAmelCase__ = False UpperCAmelCase__ = False if self.does_advance(_UpperCAmelCase ): self.fulfilled_idx += 1 UpperCAmelCase__ = True if self.fulfilled_idx == (self.seqlen - 1): UpperCAmelCase__ = True UpperCAmelCase__ = completed else: # failed to make progress. UpperCAmelCase__ = True self.reset() return stepped, completed, reset def SCREAMING_SNAKE_CASE__ ( self : Tuple ): """simple docstring""" UpperCAmelCase__ = False UpperCAmelCase__ = 0 def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" return self.seqlen - (self.fulfilled_idx + 1) def SCREAMING_SNAKE_CASE__ ( self : Any , _UpperCAmelCase : Optional[int]=False ): """simple docstring""" UpperCAmelCase__ = PhrasalConstraint(self.token_ids ) if stateful: UpperCAmelCase__ = self.seqlen UpperCAmelCase__ = self.fulfilled_idx UpperCAmelCase__ = self.completed return new_constraint class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Any , _UpperCAmelCase : List[List[int]] , _UpperCAmelCase : List[str]=True ): """simple docstring""" UpperCAmelCase__ = max([len(_UpperCAmelCase ) for one in nested_token_ids] ) UpperCAmelCase__ = {} for token_ids in nested_token_ids: UpperCAmelCase__ = root for tidx, token_id in enumerate(_UpperCAmelCase ): if token_id not in level: UpperCAmelCase__ = {} UpperCAmelCase__ = level[token_id] if no_subsets and self.has_subsets(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError( """Each list in `nested_token_ids` can't be a complete subset of another list, but is""" f''' {nested_token_ids}.''' ) UpperCAmelCase__ = root def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : int ): """simple docstring""" UpperCAmelCase__ = self.trie for current_token in current_seq: UpperCAmelCase__ = start[current_token] UpperCAmelCase__ = list(start.keys() ) return next_tokens def SCREAMING_SNAKE_CASE__ ( self : str , _UpperCAmelCase : Tuple ): """simple docstring""" UpperCAmelCase__ = self.next_tokens(_UpperCAmelCase ) return len(_UpperCAmelCase ) == 0 def SCREAMING_SNAKE_CASE__ ( self : List[str] , _UpperCAmelCase : Optional[int] ): """simple docstring""" UpperCAmelCase__ = list(root.values() ) if len(_UpperCAmelCase ) == 0: return 1 else: return sum([self.count_leaves(_UpperCAmelCase ) for nn in next_nodes] ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict ): """simple docstring""" UpperCAmelCase__ = self.count_leaves(_UpperCAmelCase ) return len(_UpperCAmelCase ) != leaf_count class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' def __init__( self : Dict , _UpperCAmelCase : List[List[int]] ): """simple docstring""" super(_UpperCAmelCase , self ).__init__() if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or len(_UpperCAmelCase ) == 0: raise ValueError(f'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' ) if any(not isinstance(_UpperCAmelCase , _UpperCAmelCase ) for token_ids in nested_token_ids ): raise ValueError(f'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' ) if any( any((not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or token_id < 0) for token_id in token_ids ) for token_ids in nested_token_ids ): raise ValueError( f'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' ) UpperCAmelCase__ = DisjunctiveTrie(_UpperCAmelCase ) UpperCAmelCase__ = nested_token_ids UpperCAmelCase__ = self.trie.max_height UpperCAmelCase__ = [] UpperCAmelCase__ = False def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" UpperCAmelCase__ = self.trie.next_tokens(self.current_seq ) if len(_UpperCAmelCase ) == 0: return None else: return token_list def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : int ): """simple docstring""" if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(_UpperCAmelCase )}''' ) UpperCAmelCase__ = self.trie.next_tokens(self.current_seq ) return token_id in next_tokens def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : int ): """simple docstring""" if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(_UpperCAmelCase )}''' ) UpperCAmelCase__ = False UpperCAmelCase__ = False UpperCAmelCase__ = False if self.does_advance(_UpperCAmelCase ): self.current_seq.append(_UpperCAmelCase ) UpperCAmelCase__ = True else: UpperCAmelCase__ = True self.reset() UpperCAmelCase__ = self.trie.reached_leaf(self.current_seq ) UpperCAmelCase__ = completed return stepped, completed, reset def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" UpperCAmelCase__ = False UpperCAmelCase__ = [] def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" if self.completed: # since this can be completed without reaching max height return 0 else: return self.seqlen - len(self.current_seq ) def SCREAMING_SNAKE_CASE__ ( self : Tuple , _UpperCAmelCase : Dict=False ): """simple docstring""" UpperCAmelCase__ = DisjunctiveConstraint(self.token_ids ) if stateful: UpperCAmelCase__ = self.seqlen UpperCAmelCase__ = self.current_seq UpperCAmelCase__ = self.completed return new_constraint class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Optional[int] , _UpperCAmelCase : List[Constraint] ): """simple docstring""" UpperCAmelCase__ = constraints # max # of steps required to fulfill a given constraint UpperCAmelCase__ = max([c.seqlen for c in constraints] ) UpperCAmelCase__ = len(_UpperCAmelCase ) UpperCAmelCase__ = False self.init_state() def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" UpperCAmelCase__ = [] UpperCAmelCase__ = None UpperCAmelCase__ = [constraint.copy(stateful=_UpperCAmelCase ) for constraint in self.constraints] def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" UpperCAmelCase__ = 0 if self.inprogress_constraint: # extra points for having a constraint mid-fulfilled add += self.max_seqlen - self.inprogress_constraint.remaining() return (len(self.complete_constraints ) * self.max_seqlen) + add def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): """simple docstring""" UpperCAmelCase__ = [] if self.inprogress_constraint is None: for constraint in self.pending_constraints: # "pending" == "unfulfilled yet" UpperCAmelCase__ = constraint.advance() if isinstance(_UpperCAmelCase , _UpperCAmelCase ): token_list.append(_UpperCAmelCase ) elif isinstance(_UpperCAmelCase , _UpperCAmelCase ): token_list.extend(_UpperCAmelCase ) else: UpperCAmelCase__ = self.inprogress_constraint.advance() if isinstance(_UpperCAmelCase , _UpperCAmelCase ): token_list.append(_UpperCAmelCase ) elif isinstance(_UpperCAmelCase , _UpperCAmelCase ): token_list.extend(_UpperCAmelCase ) if len(_UpperCAmelCase ) == 0: return None else: return token_list def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : Optional[List[int]] ): """simple docstring""" self.init_state() if token_ids is not None: for token in token_ids: # completes or steps **one** constraint UpperCAmelCase__ , UpperCAmelCase__ = self.add(_UpperCAmelCase ) # the entire list of constraints are fulfilled if self.completed: break def SCREAMING_SNAKE_CASE__ ( self : Any , _UpperCAmelCase : int ): """simple docstring""" if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError(f'''`token_id` should be an `int`, but is `{token_id}`.''' ) UpperCAmelCase__ , UpperCAmelCase__ = False, False if self.completed: UpperCAmelCase__ = True UpperCAmelCase__ = False return complete, stepped if self.inprogress_constraint is not None: # In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current # job, simply update the state UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.inprogress_constraint.update(_UpperCAmelCase ) if reset: # 1. If the next token breaks the progress, then we must restart. # e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books". # But that doesn't mean we self.init_state(), since we only reset the state for this particular # constraint, not the full list of constraints. self.pending_constraints.append(self.inprogress_constraint.copy(stateful=_UpperCAmelCase ) ) UpperCAmelCase__ = None if complete: # 2. If the next token completes the constraint, move it to completed list, set # inprogress to None. If there are no pending constraints either, then this full list of constraints # is complete. self.complete_constraints.append(self.inprogress_constraint ) UpperCAmelCase__ = None if len(self.pending_constraints ) == 0: # we're done! UpperCAmelCase__ = True else: # Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list # of constraints? for cidx, pending_constraint in enumerate(self.pending_constraints ): if pending_constraint.does_advance(_UpperCAmelCase ): UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = pending_constraint.update(_UpperCAmelCase ) if not stepped: raise Exception( """`constraint.update(token_id)` is not yielding incremental progress, """ """even though `constraint.does_advance(token_id)` is true.""" ) if complete: self.complete_constraints.append(_UpperCAmelCase ) UpperCAmelCase__ = None if not complete and stepped: UpperCAmelCase__ = pending_constraint if complete or stepped: # If we made any progress at all, then it's at least not a "pending constraint". UpperCAmelCase__ = ( self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :] ) if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None: # If there's no longer any pending after this and no inprogress either, then we must be # complete. UpperCAmelCase__ = True break # prevent accidentally stepping through multiple constraints with just one token. return complete, stepped def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : List[Any]=True ): """simple docstring""" UpperCAmelCase__ = ConstraintListState(self.constraints ) # we actually never though self.constraints objects # throughout this process. So it's at initialization state. if stateful: UpperCAmelCase__ = [ constraint.copy(stateful=_UpperCAmelCase ) for constraint in self.complete_constraints ] if self.inprogress_constraint is not None: UpperCAmelCase__ = self.inprogress_constraint.copy(stateful=_UpperCAmelCase ) UpperCAmelCase__ = [constraint.copy() for constraint in self.pending_constraints] return new_state
346
1
'''simple docstring''' from diffusers.utils.testing_utils import require_onnxruntime @require_onnxruntime class lowerCAmelCase_ : '''simple docstring''' pass
346
'''simple docstring''' import doctest import logging import os import unittest from pathlib import Path from typing import List, Union import transformers from transformers.testing_utils import require_tf, require_torch, slow UpperCAmelCase_ = logging.getLogger() @unittest.skip("""Temporarily disable the doc tests.""" ) @require_torch @require_tf @slow class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : str , _UpperCAmelCase : Path , _UpperCAmelCase : Union[str, None] = None , _UpperCAmelCase : Union[List[str], None] = None , _UpperCAmelCase : Union[str, List[str], None] = None , _UpperCAmelCase : bool = True , ): """simple docstring""" UpperCAmelCase__ = [file for file in os.listdir(_UpperCAmelCase ) if os.path.isfile(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) )] if identifier is not None: UpperCAmelCase__ = [file for file in files if identifier in file] if n_identifier is not None: if isinstance(_UpperCAmelCase , _UpperCAmelCase ): for n_ in n_identifier: UpperCAmelCase__ = [file for file in files if n_ not in file] else: UpperCAmelCase__ = [file for file in files if n_identifier not in file] UpperCAmelCase__ = ignore_files or [] ignore_files.append("""__init__.py""" ) UpperCAmelCase__ = [file for file in files if file not in ignore_files] for file in files: # Open all files print("""Testing""" , _UpperCAmelCase ) if only_modules: UpperCAmelCase__ = file.split(""".""" )[0] try: UpperCAmelCase__ = getattr(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = doctest.DocTestSuite(_UpperCAmelCase ) UpperCAmelCase__ = unittest.TextTestRunner().run(_UpperCAmelCase ) self.assertIs(len(result.failures ) , 0 ) except AttributeError: logger.info(f'''{module_identifier} is not a module.''' ) else: UpperCAmelCase__ = doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS ) self.assertIs(result.failed , 0 ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" UpperCAmelCase__ = Path("""src/transformers""" ) UpperCAmelCase__ = """modeling""" UpperCAmelCase__ = [ """modeling_ctrl.py""", """modeling_tf_ctrl.py""", ] self.analyze_directory(_UpperCAmelCase , identifier=_UpperCAmelCase , ignore_files=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" UpperCAmelCase__ = Path("""src/transformers""" ) UpperCAmelCase__ = """tokenization""" self.analyze_directory(_UpperCAmelCase , identifier=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" UpperCAmelCase__ = Path("""src/transformers""" ) UpperCAmelCase__ = """configuration""" self.analyze_directory(_UpperCAmelCase , identifier=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" UpperCAmelCase__ = Path("""src/transformers""" ) UpperCAmelCase__ = ["""configuration""", """modeling""", """tokenization"""] self.analyze_directory(_UpperCAmelCase , n_identifier=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" UpperCAmelCase__ = Path("""docs/source""" ) UpperCAmelCase__ = ["""favicon.ico"""] self.analyze_directory(_UpperCAmelCase , ignore_files=_UpperCAmelCase , only_modules=_UpperCAmelCase )
346
1
'''simple docstring''' import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' lowerCAmelCase_ : Dict = ["""image_processor""", """tokenizer"""] lowerCAmelCase_ : Union[str, Any] = """OwlViTImageProcessor""" lowerCAmelCase_ : Union[str, Any] = ("""CLIPTokenizer""", """CLIPTokenizerFast""") def __init__( self : List[Any] , _UpperCAmelCase : Union[str, Any]=None , _UpperCAmelCase : Optional[Any]=None , **_UpperCAmelCase : List[str] ): """simple docstring""" UpperCAmelCase__ = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , _UpperCAmelCase , ) UpperCAmelCase__ = kwargs.pop("""feature_extractor""" ) UpperCAmelCase__ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(_UpperCAmelCase , _UpperCAmelCase ) def __call__( self : List[Any] , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : Any="max_length" , _UpperCAmelCase : Dict="np" , **_UpperCAmelCase : List[Any] ): """simple docstring""" if text is None and query_images is None and images is None: raise ValueError( """You have to specify at least one text or query image or image. All three cannot be none.""" ) if text is not None: if isinstance(_UpperCAmelCase , _UpperCAmelCase ) or (isinstance(_UpperCAmelCase , _UpperCAmelCase ) and not isinstance(text[0] , _UpperCAmelCase )): UpperCAmelCase__ = [self.tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )] elif isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(text[0] , _UpperCAmelCase ): UpperCAmelCase__ = [] # Maximum number of queries across batch UpperCAmelCase__ = max([len(_UpperCAmelCase ) for t in text] ) # Pad all batch samples to max number of text queries for t in text: if len(_UpperCAmelCase ) != max_num_queries: UpperCAmelCase__ = t + [""" """] * (max_num_queries - len(_UpperCAmelCase )) UpperCAmelCase__ = self.tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase ) encodings.append(_UpperCAmelCase ) else: raise TypeError("""Input text should be a string, a list of strings or a nested list of strings""" ) if return_tensors == "np": UpperCAmelCase__ = np.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 ) UpperCAmelCase__ = np.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 ) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp UpperCAmelCase__ = jnp.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 ) UpperCAmelCase__ = jnp.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 ) elif return_tensors == "pt" and is_torch_available(): import torch UpperCAmelCase__ = torch.cat([encoding["""input_ids"""] for encoding in encodings] , dim=0 ) UpperCAmelCase__ = torch.cat([encoding["""attention_mask"""] for encoding in encodings] , dim=0 ) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf UpperCAmelCase__ = tf.stack([encoding["""input_ids"""] for encoding in encodings] , axis=0 ) UpperCAmelCase__ = tf.stack([encoding["""attention_mask"""] for encoding in encodings] , axis=0 ) else: raise ValueError("""Target return tensor type could not be returned""" ) UpperCAmelCase__ = BatchEncoding() UpperCAmelCase__ = input_ids UpperCAmelCase__ = attention_mask if query_images is not None: UpperCAmelCase__ = BatchEncoding() UpperCAmelCase__ = self.image_processor( _UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase ).pixel_values UpperCAmelCase__ = query_pixel_values if images is not None: UpperCAmelCase__ = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase ) if text is not None and images is not None: UpperCAmelCase__ = image_features.pixel_values return encoding elif query_images is not None and images is not None: UpperCAmelCase__ = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**_UpperCAmelCase ) , tensor_type=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Dict , *_UpperCAmelCase : List[str] , **_UpperCAmelCase : Any ): """simple docstring""" return self.image_processor.post_process(*_UpperCAmelCase , **_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Any , *_UpperCAmelCase : Tuple , **_UpperCAmelCase : str ): """simple docstring""" return self.image_processor.post_process_object_detection(*_UpperCAmelCase , **_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : Dict ): """simple docstring""" return self.image_processor.post_process_image_guided_detection(*_UpperCAmelCase , **_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Any , *_UpperCAmelCase : str , **_UpperCAmelCase : Dict ): """simple docstring""" return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , *_UpperCAmelCase : str , **_UpperCAmelCase : Union[str, Any] ): """simple docstring""" return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase ) @property def SCREAMING_SNAKE_CASE__ ( self : Tuple ): """simple docstring""" warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , _UpperCAmelCase , ) return self.image_processor_class @property def SCREAMING_SNAKE_CASE__ ( self : Tuple ): """simple docstring""" warnings.warn( """`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , _UpperCAmelCase , ) return self.image_processor
346
'''simple docstring''' from datasets.utils.patching import _PatchedModuleObj, patch_submodule from . import _test_patching def _UpperCamelCase ( ): '''simple docstring''' import os as original_os from os import path as original_path from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join UpperCAmelCase__ = """__test_patch_submodule_mock__""" with patch_submodule(_test_patching , """os.path.join""" , SCREAMING_SNAKE_CASE__ ): # Every way to access os.path.join must be patched, and the rest must stay untouched # check os.path.join assert isinstance(_test_patching.os , _PatchedModuleObj ) assert isinstance(_test_patching.os.path , _PatchedModuleObj ) assert _test_patching.os.path.join is mock # check path.join assert isinstance(_test_patching.path , _PatchedModuleObj ) assert _test_patching.path.join is mock # check join assert _test_patching.join is mock # check that the other attributes are untouched assert _test_patching.os.rename is original_rename assert _test_patching.path.dirname is original_dirname assert _test_patching.os.path.dirname is original_dirname # Even renamed modules or objects must be patched # check renamed_os.path.join assert isinstance(_test_patching.renamed_os , _PatchedModuleObj ) assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj ) assert _test_patching.renamed_os.path.join is mock # check renamed_path.join assert isinstance(_test_patching.renamed_path , _PatchedModuleObj ) assert _test_patching.renamed_path.join is mock # check renamed_join assert _test_patching.renamed_join is mock # check that the other attributes are untouched assert _test_patching.renamed_os.rename is original_rename assert _test_patching.renamed_path.dirname is original_dirname assert _test_patching.renamed_os.path.dirname is original_dirname # check that everthing is back to normal when the patch is over assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join def _UpperCamelCase ( ): '''simple docstring''' assert _test_patching.open is open UpperCAmelCase__ = """__test_patch_submodule_builtin_mock__""" # _test_patching has "open" in its globals assert _test_patching.open is open with patch_submodule(_test_patching , """open""" , SCREAMING_SNAKE_CASE__ ): assert _test_patching.open is mock # check that everthing is back to normal when the patch is over assert _test_patching.open is open def _UpperCamelCase ( ): '''simple docstring''' UpperCAmelCase__ = """__test_patch_submodule_missing_mock__""" with patch_submodule(_test_patching , """pandas.read_csv""" , SCREAMING_SNAKE_CASE__ ): pass def _UpperCamelCase ( ): '''simple docstring''' UpperCAmelCase__ = """__test_patch_submodule_missing_builtin_mock__""" # _test_patching doesn't have "len" in its globals assert getattr(_test_patching , """len""" , SCREAMING_SNAKE_CASE__ ) is None with patch_submodule(_test_patching , """len""" , SCREAMING_SNAKE_CASE__ ): assert _test_patching.len is mock assert _test_patching.len is len def _UpperCamelCase ( ): '''simple docstring''' UpperCAmelCase__ = """__test_patch_submodule_start_and_stop_mock__""" UpperCAmelCase__ = patch_submodule(_test_patching , """open""" , SCREAMING_SNAKE_CASE__ ) assert _test_patching.open is open patch.start() assert _test_patching.open is mock patch.stop() assert _test_patching.open is open def _UpperCamelCase ( ): '''simple docstring''' from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join UpperCAmelCase__ = """__test_patch_submodule_successive_join__""" UpperCAmelCase__ = """__test_patch_submodule_successive_dirname__""" UpperCAmelCase__ = """__test_patch_submodule_successive_rename__""" assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename with patch_submodule(_test_patching , """os.path.join""" , SCREAMING_SNAKE_CASE__ ): with patch_submodule(_test_patching , """os.rename""" , SCREAMING_SNAKE_CASE__ ): with patch_submodule(_test_patching , """os.path.dirname""" , SCREAMING_SNAKE_CASE__ ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename # try another order with patch_submodule(_test_patching , """os.rename""" , SCREAMING_SNAKE_CASE__ ): with patch_submodule(_test_patching , """os.path.join""" , SCREAMING_SNAKE_CASE__ ): with patch_submodule(_test_patching , """os.path.dirname""" , SCREAMING_SNAKE_CASE__ ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename def _UpperCamelCase ( ): '''simple docstring''' UpperCAmelCase__ = """__test_patch_submodule_doesnt_exist_mock__""" with patch_submodule(_test_patching , """__module_that_doesn_exist__.__attribute_that_doesn_exist__""" , SCREAMING_SNAKE_CASE__ ): pass with patch_submodule(_test_patching , """os.__attribute_that_doesn_exist__""" , SCREAMING_SNAKE_CASE__ ): pass
346
1
'''simple docstring''' def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' assert ( isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and number_of_steps > 0 ), F'''number_of_steps needs to be positive integer, your input {number_of_steps}''' if number_of_steps == 1: return 1 UpperCAmelCase__ , UpperCAmelCase__ = 1, 1 for _ in range(number_of_steps - 1 ): UpperCAmelCase__ , UpperCAmelCase__ = current + previous, current return current if __name__ == "__main__": import doctest doctest.testmod()
346
'''simple docstring''' from timeit import timeit UpperCAmelCase_ = { 'MALAYALAM': True, 'String': False, 'rotor': True, 'level': True, 'A': True, 'BB': True, 'ABC': False, 'amanaplanacanalpanama': True, # "a man a plan a canal panama" } # Ensure our test data is valid assert all((key == key[::-1]) is value for key, value in test_data.items()) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' UpperCAmelCase__ = 0 UpperCAmelCase__ = len(SCREAMING_SNAKE_CASE__ ) - 1 while start_i < end_i: if s[start_i] == s[end_i]: start_i += 1 end_i -= 1 else: return False return True def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' UpperCAmelCase__ = len(SCREAMING_SNAKE_CASE__ ) // 2 UpperCAmelCase__ = len(SCREAMING_SNAKE_CASE__ ) # We need to traverse till half of the length of string # as we can get access of the i'th last element from # i'th index. # eg: [0,1,2,3,4,5] => 4th index can be accessed # with the help of 1st index (i==n-i-1) # where n is length of string return all(s[i] == s[n - i - 1] for i in range(SCREAMING_SNAKE_CASE__ ) ) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' if len(SCREAMING_SNAKE_CASE__ ) <= 2: return True if s[0] == s[len(SCREAMING_SNAKE_CASE__ ) - 1]: return is_palindrome_recursive(s[1:-1] ) else: return False def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' return s == s[::-1] def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' UpperCAmelCase__ = F'''all({name}(key) is value for key, value in test_data.items())''' UpperCAmelCase__ = F'''from __main__ import test_data, {name}''' UpperCAmelCase__ = 500000 UpperCAmelCase__ = timeit(stmt=SCREAMING_SNAKE_CASE__ , setup=SCREAMING_SNAKE_CASE__ , number=SCREAMING_SNAKE_CASE__ ) print(F'''{name:<35} finished {number:,} runs in {result:.5f} seconds''' ) if __name__ == "__main__": for key, value in test_data.items(): assert is_palindrome(key) is is_palindrome_recursive(key) assert is_palindrome(key) is is_palindrome_slice(key) print(f"{key:21} {value}") print('a man a plan a canal panama') # finished 500,000 runs in 0.46793 seconds benchmark_function('is_palindrome_slice') # finished 500,000 runs in 0.85234 seconds benchmark_function('is_palindrome') # finished 500,000 runs in 1.32028 seconds benchmark_function('is_palindrome_recursive') # finished 500,000 runs in 2.08679 seconds benchmark_function('is_palindrome_traversal')
346
1
'''simple docstring''' from __future__ import annotations import unittest import numpy as np from transformers import BlipTextConfig from transformers.testing_utils import require_tf, slow from transformers.utils import is_tf_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask if is_tf_available(): import tensorflow as tf from transformers import TFBlipTextModel from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCAmelCase_ : '''simple docstring''' def __init__( self : str , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int=12 , _UpperCAmelCase : Optional[int]=7 , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : Tuple=99 , _UpperCAmelCase : Optional[Any]=32 , _UpperCAmelCase : str=32 , _UpperCAmelCase : int=2 , _UpperCAmelCase : int=4 , _UpperCAmelCase : int=37 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : int=0.1 , _UpperCAmelCase : Optional[int]=5_12 , _UpperCAmelCase : Dict=0.02 , _UpperCAmelCase : List[Any]=0 , _UpperCAmelCase : Union[str, Any]=None , ): """simple docstring""" UpperCAmelCase__ = parent UpperCAmelCase__ = batch_size UpperCAmelCase__ = seq_length UpperCAmelCase__ = is_training UpperCAmelCase__ = use_input_mask UpperCAmelCase__ = use_labels UpperCAmelCase__ = vocab_size UpperCAmelCase__ = hidden_size UpperCAmelCase__ = projection_dim UpperCAmelCase__ = num_hidden_layers UpperCAmelCase__ = num_attention_heads UpperCAmelCase__ = intermediate_size UpperCAmelCase__ = dropout UpperCAmelCase__ = attention_dropout UpperCAmelCase__ = max_position_embeddings UpperCAmelCase__ = initializer_range UpperCAmelCase__ = scope UpperCAmelCase__ = bos_token_id def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase__ = None if self.use_input_mask: UpperCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] ) if input_mask is not None: UpperCAmelCase__ = input_mask.numpy() UpperCAmelCase__ , UpperCAmelCase__ = input_mask.shape UpperCAmelCase__ = np.random.randint(1 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(_UpperCAmelCase ): UpperCAmelCase__ = 1 UpperCAmelCase__ = 0 UpperCAmelCase__ = self.get_config() return config, input_ids, tf.convert_to_tensor(_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): """simple docstring""" return BlipTextConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : int ): """simple docstring""" UpperCAmelCase__ = TFBlipTextModel(config=_UpperCAmelCase ) UpperCAmelCase__ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , training=_UpperCAmelCase ) UpperCAmelCase__ = model(_UpperCAmelCase , training=_UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" UpperCAmelCase__ = self.prepare_config_and_inputs() UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = config_and_inputs UpperCAmelCase__ = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_tf class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ : Any = (TFBlipTextModel,) if is_tf_available() else () lowerCAmelCase_ : Dict = False lowerCAmelCase_ : Any = False lowerCAmelCase_ : int = False def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): """simple docstring""" UpperCAmelCase__ = BlipTextModelTester(self ) UpperCAmelCase__ = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 ) def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" pass def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" pass @unittest.skip(reason="""Blip does not use inputs_embeds""" ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ): """simple docstring""" pass @unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): """simple docstring""" pass @unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" ) def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" pass @slow def SCREAMING_SNAKE_CASE__ ( self : int ): """simple docstring""" for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ = TFBlipTextModel.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] , _UpperCAmelCase : List[Any]=True ): """simple docstring""" super().test_pt_tf_model_equivalence(allow_missing_keys=_UpperCAmelCase )
346
'''simple docstring''' import datasets from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py UpperCAmelCase_ = '\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",\n author = "Lin, Chin-Yew and\n Och, Franz Josef",\n booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",\n month = "aug 23{--}aug 27",\n year = "2004",\n address = "Geneva, Switzerland",\n publisher = "COLING",\n url = "https://www.aclweb.org/anthology/C04-1072",\n pages = "501--507",\n}\n' UpperCAmelCase_ = '\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,\nthe better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n' UpperCAmelCase_ = '\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n \'bleu\': bleu score,\n \'precisions\': geometric mean of n-gram precisions,\n \'brevity_penalty\': brevity penalty,\n \'length_ratio\': ratio of lengths,\n \'translation_length\': translation_length,\n \'reference_length\': reference_length\nExamples:\n\n >>> predictions = [\n ... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample\n ... ["foo", "bar", "foobar"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)\n ... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric("bleu")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results["bleu"])\n 1.0\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase_ ( datasets.Metric ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : int ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ), """references""": datasets.Sequence( datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ), } ) , codebase_urls=["""https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"""] , reference_urls=[ """https://en.wikipedia.org/wiki/BLEU""", """https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""", ] , ) def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any]=4 , _UpperCAmelCase : Union[str, Any]=False ): """simple docstring""" UpperCAmelCase__ = compute_bleu( reference_corpus=_UpperCAmelCase , translation_corpus=_UpperCAmelCase , max_order=_UpperCAmelCase , smooth=_UpperCAmelCase ) ((UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__)) = score return { "bleu": bleu, "precisions": precisions, "brevity_penalty": bp, "length_ratio": ratio, "translation_length": translation_length, "reference_length": reference_length, }
346
1
'''simple docstring''' import PIL.Image import PIL.ImageOps from packaging import version from PIL import Image if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'): UpperCAmelCase_ = { 'linear': PIL.Image.Resampling.BILINEAR, 'bilinear': PIL.Image.Resampling.BILINEAR, 'bicubic': PIL.Image.Resampling.BICUBIC, 'lanczos': PIL.Image.Resampling.LANCZOS, 'nearest': PIL.Image.Resampling.NEAREST, } else: UpperCAmelCase_ = { 'linear': PIL.Image.LINEAR, 'bilinear': PIL.Image.BILINEAR, 'bicubic': PIL.Image.BICUBIC, 'lanczos': PIL.Image.LANCZOS, 'nearest': PIL.Image.NEAREST, } def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ = (images / 2 + 0.5).clamp(0 , 1 ) UpperCAmelCase__ = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() UpperCAmelCase__ = numpy_to_pil(SCREAMING_SNAKE_CASE__ ) return images def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Dict ): '''simple docstring''' if images.ndim == 3: UpperCAmelCase__ = images[None, ...] UpperCAmelCase__ = (images * 255).round().astype("""uint8""" ) if images.shape[-1] == 1: # special case for grayscale (single channel) images UpperCAmelCase__ = [Image.fromarray(image.squeeze() , mode="""L""" ) for image in images] else: UpperCAmelCase__ = [Image.fromarray(SCREAMING_SNAKE_CASE__ ) for image in images] return pil_images
346
'''simple docstring''' from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import torch from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available @dataclass class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' lowerCAmelCase_ : Union[List[np.ndarray], torch.FloatTensor] try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_text_to_video_synth import TextToVideoSDPipeline from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401 from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
346
1
'''simple docstring''' import gc import random import unittest import numpy as np import torch from diffusers import ( DDIMScheduler, KandinskyVaaControlnetPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ : Optional[Any] = KandinskyVaaControlnetPipeline lowerCAmelCase_ : List[Any] = ["""image_embeds""", """negative_image_embeds""", """hint"""] lowerCAmelCase_ : Optional[int] = ["""image_embeds""", """negative_image_embeds""", """hint"""] lowerCAmelCase_ : Optional[int] = [ """generator""", """height""", """width""", """latents""", """guidance_scale""", """num_inference_steps""", """return_dict""", """guidance_scale""", """num_images_per_prompt""", """output_type""", """return_dict""", ] lowerCAmelCase_ : Tuple = False @property def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" return 32 @property def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" return 32 @property def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" return self.time_input_dim @property def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): """simple docstring""" return self.time_input_dim * 4 @property def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" return 1_00 @property def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" torch.manual_seed(0 ) UpperCAmelCase__ = { """in_channels""": 8, # Out channels is double in channels because predicts mean and variance """out_channels""": 8, """addition_embed_type""": """image_hint""", """down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""), """up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""), """mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""", """block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2), """layers_per_block""": 1, """encoder_hid_dim""": self.text_embedder_hidden_size, """encoder_hid_dim_type""": """image_proj""", """cross_attention_dim""": self.cross_attention_dim, """attention_head_dim""": 4, """resnet_time_scale_shift""": """scale_shift""", """class_embed_type""": None, } UpperCAmelCase__ = UNetaDConditionModel(**_UpperCAmelCase ) return model @property def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" return { "block_out_channels": [32, 32, 64, 64], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" torch.manual_seed(0 ) UpperCAmelCase__ = VQModel(**self.dummy_movq_kwargs ) return model def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" UpperCAmelCase__ = self.dummy_unet UpperCAmelCase__ = self.dummy_movq UpperCAmelCase__ = DDIMScheduler( num_train_timesteps=10_00 , beta_schedule="""linear""" , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=_UpperCAmelCase , set_alpha_to_one=_UpperCAmelCase , steps_offset=1 , prediction_type="""epsilon""" , thresholding=_UpperCAmelCase , ) UpperCAmelCase__ = { """unet""": unet, """scheduler""": scheduler, """movq""": movq, } return components def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any]=0 ): """simple docstring""" UpperCAmelCase__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase ) UpperCAmelCase__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( _UpperCAmelCase ) # create hint UpperCAmelCase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase ) if str(_UpperCAmelCase ).startswith("""mps""" ): UpperCAmelCase__ = torch.manual_seed(_UpperCAmelCase ) else: UpperCAmelCase__ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase ) UpperCAmelCase__ = { """image_embeds""": image_embeds, """negative_image_embeds""": negative_image_embeds, """hint""": hint, """generator""": generator, """height""": 64, """width""": 64, """guidance_scale""": 4.0, """num_inference_steps""": 2, """output_type""": """np""", } return inputs def SCREAMING_SNAKE_CASE__ ( self : int ): """simple docstring""" UpperCAmelCase__ = """cpu""" UpperCAmelCase__ = self.get_dummy_components() UpperCAmelCase__ = self.pipeline_class(**_UpperCAmelCase ) UpperCAmelCase__ = pipe.to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) UpperCAmelCase__ = pipe(**self.get_dummy_inputs(_UpperCAmelCase ) ) UpperCAmelCase__ = output.images UpperCAmelCase__ = pipe( **self.get_dummy_inputs(_UpperCAmelCase ) , return_dict=_UpperCAmelCase , )[0] UpperCAmelCase__ = image[0, -3:, -3:, -1] UpperCAmelCase__ = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) UpperCAmelCase__ = np.array( [0.695_9826, 0.86_8279, 0.755_8092, 0.6876_9467, 0.8580_5804, 0.6597_7496, 0.4488_5302, 0.595_9111, 0.425_1595] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' @slow @require_torch_gpu class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" UpperCAmelCase__ = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy""" ) UpperCAmelCase__ = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinskyv22/hint_image_cat.png""" ) UpperCAmelCase__ = torch.from_numpy(np.array(_UpperCAmelCase ) ).float() / 255.0 UpperCAmelCase__ = hint.permute(2 , 0 , 1 ).unsqueeze(0 ) UpperCAmelCase__ = KandinskyVaaPriorPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa ) pipe_prior.to(_UpperCAmelCase ) UpperCAmelCase__ = KandinskyVaaControlnetPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-controlnet-depth""" , torch_dtype=torch.floataa ) UpperCAmelCase__ = pipeline.to(_UpperCAmelCase ) pipeline.set_progress_bar_config(disable=_UpperCAmelCase ) UpperCAmelCase__ = """A robot, 4k photo""" UpperCAmelCase__ = torch.Generator(device="""cuda""" ).manual_seed(0 ) UpperCAmelCase__ , UpperCAmelCase__ = pipe_prior( _UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple() UpperCAmelCase__ = torch.Generator(device="""cuda""" ).manual_seed(0 ) UpperCAmelCase__ = pipeline( image_embeds=_UpperCAmelCase , negative_image_embeds=_UpperCAmelCase , hint=_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=1_00 , output_type="""np""" , ) UpperCAmelCase__ = output.images[0] assert image.shape == (5_12, 5_12, 3) assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
346
'''simple docstring''' import torch import torch.nn as nn from transformers.modeling_utils import ModuleUtilsMixin from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class lowerCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): '''simple docstring''' @register_to_config def __init__( self : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : float , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : bool = False , ): """simple docstring""" super().__init__() UpperCAmelCase__ = nn.Embedding(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = nn.Embedding(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = False UpperCAmelCase__ = nn.Dropout(p=_UpperCAmelCase ) UpperCAmelCase__ = TaConfig( vocab_size=_UpperCAmelCase , d_model=_UpperCAmelCase , num_heads=_UpperCAmelCase , d_kv=_UpperCAmelCase , d_ff=_UpperCAmelCase , dropout_rate=_UpperCAmelCase , feed_forward_proj=_UpperCAmelCase , is_decoder=_UpperCAmelCase , is_encoder_decoder=_UpperCAmelCase , ) UpperCAmelCase__ = nn.ModuleList() for lyr_num in range(_UpperCAmelCase ): UpperCAmelCase__ = TaBlock(_UpperCAmelCase ) self.encoders.append(_UpperCAmelCase ) UpperCAmelCase__ = TaLayerNorm(_UpperCAmelCase ) UpperCAmelCase__ = nn.Dropout(p=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : int , _UpperCAmelCase : str ): """simple docstring""" UpperCAmelCase__ = self.token_embedder(_UpperCAmelCase ) UpperCAmelCase__ = encoder_input_tokens.shape[1] UpperCAmelCase__ = torch.arange(_UpperCAmelCase , device=encoder_input_tokens.device ) x += self.position_encoding(_UpperCAmelCase ) UpperCAmelCase__ = self.dropout_pre(_UpperCAmelCase ) # inverted the attention mask UpperCAmelCase__ = encoder_input_tokens.size() UpperCAmelCase__ = self.get_extended_attention_mask(_UpperCAmelCase , _UpperCAmelCase ) for lyr in self.encoders: UpperCAmelCase__ = lyr(_UpperCAmelCase , _UpperCAmelCase )[0] UpperCAmelCase__ = self.layer_norm(_UpperCAmelCase ) return self.dropout_post(_UpperCAmelCase ), encoder_inputs_mask
346
1
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase_ = {'configuration_timm_backbone': ['TimmBackboneConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = ['TimmBackbone'] if TYPE_CHECKING: from .configuration_timm_backbone import TimmBackboneConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timm_backbone import TimmBackbone else: import sys UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
346
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConfig, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaForPreTraining, WavaVecaProcessor, logging, ) from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification logging.set_verbosity_info() UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'adapter_layer': 'encoder.layers.*.adapter_layer', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', 'pooling_layer.linear': 'projector', 'pooling_layer.projection': 'classifier', } UpperCAmelCase_ = [ 'lm_head', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', 'projector', 'classifier', ] def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Tuple ): '''simple docstring''' UpperCAmelCase__ = {} with open(SCREAMING_SNAKE_CASE__ , """r""" ) as file: for line_number, line in enumerate(SCREAMING_SNAKE_CASE__ ): UpperCAmelCase__ = line.strip() if line: UpperCAmelCase__ = line.split() UpperCAmelCase__ = line_number UpperCAmelCase__ = words[0] UpperCAmelCase__ = value return result def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ): '''simple docstring''' for attribute in key.split(""".""" ): UpperCAmelCase__ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(SCREAMING_SNAKE_CASE__ ): UpperCAmelCase__ = PARAM_MAPPING[full_name.split(""".""" )[-1]] UpperCAmelCase__ = """param""" if weight_type is not None and weight_type != "param": UpperCAmelCase__ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).shape elif weight_type is not None and weight_type == "param": UpperCAmelCase__ = hf_pointer for attribute in hf_param_name.split(""".""" ): UpperCAmelCase__ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = shape_pointer.shape # let's reduce dimension UpperCAmelCase__ = value[0] else: UpperCAmelCase__ = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": UpperCAmelCase__ = value elif weight_type == "weight_g": UpperCAmelCase__ = value elif weight_type == "weight_v": UpperCAmelCase__ = value elif weight_type == "bias": UpperCAmelCase__ = value elif weight_type == "param": for attribute in hf_param_name.split(""".""" ): UpperCAmelCase__ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = value else: UpperCAmelCase__ = value logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' ) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(SCREAMING_SNAKE_CASE__ ): UpperCAmelCase__ = PARAM_MAPPING[full_name.split(""".""" )[-1]] UpperCAmelCase__ = """param""" if weight_type is not None and weight_type != "param": UpperCAmelCase__ = """.""".join([key, weight_type] ) elif weight_type is not None and weight_type == "param": UpperCAmelCase__ = """.""".join([key, hf_param_name] ) else: UpperCAmelCase__ = key UpperCAmelCase__ = value if """lm_head""" in full_key else value[0] UpperCAmelCase_ = { 'W_a': 'linear_1.weight', 'W_b': 'linear_2.weight', 'b_a': 'linear_1.bias', 'b_b': 'linear_2.bias', 'ln_W': 'norm.weight', 'ln_b': 'norm.bias', } def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None ): '''simple docstring''' UpperCAmelCase__ = False for key, mapped_key in MAPPING.items(): UpperCAmelCase__ = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: UpperCAmelCase__ = True if "*" in mapped_key: UpperCAmelCase__ = name.split(SCREAMING_SNAKE_CASE__ )[0].split(""".""" )[-2] UpperCAmelCase__ = mapped_key.replace("""*""" , SCREAMING_SNAKE_CASE__ ) if "weight_g" in name: UpperCAmelCase__ = """weight_g""" elif "weight_v" in name: UpperCAmelCase__ = """weight_v""" elif "bias" in name: UpperCAmelCase__ = """bias""" elif "weight" in name: # TODO: don't match quantizer.weight_proj UpperCAmelCase__ = """weight""" else: UpperCAmelCase__ = None if hf_dict is not None: rename_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else: set_recursively(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return is_used return is_used def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] ): '''simple docstring''' UpperCAmelCase__ = [] UpperCAmelCase__ = fairseq_model.state_dict() UpperCAmelCase__ = hf_model.wavaveca.feature_extractor for name, value in fairseq_dict.items(): UpperCAmelCase__ = False if "conv_layers" in name: load_conv_layer( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , hf_model.config.feat_extract_norm == """group""" , ) UpperCAmelCase__ = True else: UpperCAmelCase__ = load_wavaveca_layer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if not is_used: unused_weights.append(SCREAMING_SNAKE_CASE__ ) logger.warning(F'''Unused weights: {unused_weights}''' ) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' UpperCAmelCase__ = full_name.split("""conv_layers.""" )[-1] UpperCAmelCase__ = name.split(""".""" ) UpperCAmelCase__ = int(items[0] ) UpperCAmelCase__ = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) UpperCAmelCase__ = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) UpperCAmelCase__ = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' ) UpperCAmelCase__ = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' ) UpperCAmelCase__ = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(SCREAMING_SNAKE_CASE__ ) @torch.no_grad() def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False ): '''simple docstring''' if config_path is not None: UpperCAmelCase__ = WavaVecaConfig.from_pretrained(SCREAMING_SNAKE_CASE__ ) else: UpperCAmelCase__ = WavaVecaConfig() if is_seq_class: UpperCAmelCase__ = read_txt_into_dict(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = idalabel UpperCAmelCase__ = WavaVecaForSequenceClassification(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , ) feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE__ ) elif is_finetuned: if dict_path: UpperCAmelCase__ = Dictionary.load(SCREAMING_SNAKE_CASE__ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq UpperCAmelCase__ = target_dict.pad_index UpperCAmelCase__ = target_dict.bos_index UpperCAmelCase__ = target_dict.eos_index UpperCAmelCase__ = len(target_dict.symbols ) UpperCAmelCase__ = os.path.join(SCREAMING_SNAKE_CASE__ , """vocab.json""" ) if not os.path.isdir(SCREAMING_SNAKE_CASE__ ): logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(SCREAMING_SNAKE_CASE__ ) ) return os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = target_dict.indices # fairseq has the <pad> and <s> switched UpperCAmelCase__ = 0 UpperCAmelCase__ = 1 with open(SCREAMING_SNAKE_CASE__ , """w""" , encoding="""utf-8""" ) as vocab_handle: json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = WavaVecaCTCTokenizer( SCREAMING_SNAKE_CASE__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=SCREAMING_SNAKE_CASE__ , ) UpperCAmelCase__ = True if config.feat_extract_norm == """layer""" else False UpperCAmelCase__ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , ) UpperCAmelCase__ = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ ) processor.save_pretrained(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = WavaVecaForCTC(SCREAMING_SNAKE_CASE__ ) else: UpperCAmelCase__ = WavaVecaForPreTraining(SCREAMING_SNAKE_CASE__ ) if is_finetuned or is_seq_class: UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) else: UpperCAmelCase__ = argparse.Namespace(task="""audio_pretraining""" ) UpperCAmelCase__ = fairseq.tasks.setup_task(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = model[0].eval() recursively_load_weights(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , not is_finetuned ) hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not' ) parser.add_argument( '--is_seq_class', action='store_true', help='Whether the model to convert is a fine-tuned sequence classification model or not', ) UpperCAmelCase_ = parser.parse_args() UpperCAmelCase_ = not args.not_finetuned and not args.is_seq_class convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, is_finetuned, args.is_seq_class, )
346
1
'''simple docstring''' import json import os import tempfile import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ImageGPTImageProcessor class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def __init__( self : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple=7 , _UpperCAmelCase : Union[str, Any]=3 , _UpperCAmelCase : str=18 , _UpperCAmelCase : List[str]=30 , _UpperCAmelCase : Optional[int]=4_00 , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : int=True , ): """simple docstring""" UpperCAmelCase__ = size if size is not None else {"""height""": 18, """width""": 18} UpperCAmelCase__ = parent UpperCAmelCase__ = batch_size UpperCAmelCase__ = num_channels UpperCAmelCase__ = image_size UpperCAmelCase__ = min_resolution UpperCAmelCase__ = max_resolution UpperCAmelCase__ = do_resize UpperCAmelCase__ = size UpperCAmelCase__ = do_normalize def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" return { # here we create 2 clusters for the sake of simplicity "clusters": np.asarray( [ [0.8866_4436_3403_3203, 0.6618_8293_6954_4983, 0.3891_7464_0178_6804], [-0.6042_5591_4688_1104, -0.0_2295_0088_6052_8469, 0.5423_7973_6900_3296], ] ), "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, } @require_torch @require_vision class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ : Tuple = ImageGPTImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE__ ( self : int ): """simple docstring""" UpperCAmelCase__ = ImageGPTImageProcessingTester(self ) @property def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" UpperCAmelCase__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_UpperCAmelCase , """clusters""" ) ) self.assertTrue(hasattr(_UpperCAmelCase , """do_resize""" ) ) self.assertTrue(hasattr(_UpperCAmelCase , """size""" ) ) self.assertTrue(hasattr(_UpperCAmelCase , """do_normalize""" ) ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" UpperCAmelCase__ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} ) UpperCAmelCase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} ) def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" UpperCAmelCase__ = self.image_processing_class(**self.image_processor_dict ) UpperCAmelCase__ = json.loads(image_processor.to_json_string() ) for key, value in self.image_processor_dict.items(): if key == "clusters": self.assertTrue(np.array_equal(_UpperCAmelCase , obj[key] ) ) else: self.assertEqual(obj[key] , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): """simple docstring""" UpperCAmelCase__ = self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase__ = os.path.join(_UpperCAmelCase , """image_processor.json""" ) image_processor_first.to_json_file(_UpperCAmelCase ) UpperCAmelCase__ = self.image_processing_class.from_json_file(_UpperCAmelCase ).to_dict() UpperCAmelCase__ = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(_UpperCAmelCase , image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : int ): """simple docstring""" UpperCAmelCase__ = self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: image_processor_first.save_pretrained(_UpperCAmelCase ) UpperCAmelCase__ = self.image_processing_class.from_pretrained(_UpperCAmelCase ).to_dict() UpperCAmelCase__ = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(_UpperCAmelCase , image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] , _UpperCAmelCase ) @unittest.skip("""ImageGPT requires clusters at initialization""" ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): """simple docstring""" pass def _UpperCamelCase ( ): '''simple docstring''' UpperCAmelCase__ = load_dataset("""hf-internal-testing/fixtures_image_utils""" , split="""test""" ) UpperCAmelCase__ = Image.open(dataset[4]["""file"""] ) UpperCAmelCase__ = Image.open(dataset[5]["""file"""] ) UpperCAmelCase__ = [imagea, imagea] return images @require_vision @require_torch class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" UpperCAmelCase__ = ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" ) UpperCAmelCase__ = prepare_images() # test non-batched UpperCAmelCase__ = image_processing(images[0] , return_tensors="""pt""" ) self.assertIsInstance(encoding.input_ids , torch.LongTensor ) self.assertEqual(encoding.input_ids.shape , (1, 10_24) ) UpperCAmelCase__ = [3_06, 1_91, 1_91] self.assertEqual(encoding.input_ids[0, :3].tolist() , _UpperCAmelCase ) # test batched UpperCAmelCase__ = image_processing(_UpperCAmelCase , return_tensors="""pt""" ) self.assertIsInstance(encoding.input_ids , torch.LongTensor ) self.assertEqual(encoding.input_ids.shape , (2, 10_24) ) UpperCAmelCase__ = [3_03, 13, 13] self.assertEqual(encoding.input_ids[1, -3:].tolist() , _UpperCAmelCase )
346
'''simple docstring''' import itertools import os from collections import Counter, defaultdict from concurrent.futures import ThreadPoolExecutor, as_completed import numpy as np import datasets from .execute import check_correctness UpperCAmelCase_ = '\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n' UpperCAmelCase_ = '\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper "Evaluating Large Language Models Trained on Code"\n(https://arxiv.org/abs/2107.03374).\n' UpperCAmelCase_ = '\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric("code_eval")\n >>> test_cases = ["assert add(2,3)==5"]\n >>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {\'pass@1\': 0.5, \'pass@2\': 1.0}\n' UpperCAmelCase_ = '\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe "code_eval" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper "Evaluating Large\nLanguage Models Trained on Code" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"\n\n################################################################################\\n' UpperCAmelCase_ = 'The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the "Software"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase_ ( datasets.Metric ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" return datasets.MetricInfo( # This is the description that will appear on the metrics page. description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""" ) ), """references""": datasets.Value("""string""" ), } ) , homepage="""https://github.com/openai/human-eval""" , codebase_urls=["""https://github.com/openai/human-eval"""] , reference_urls=["""https://github.com/openai/human-eval"""] , license=_LICENSE , ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str]=[1, 10, 1_00] , _UpperCAmelCase : Optional[Any]=4 , _UpperCAmelCase : Any=3.0 ): """simple docstring""" if os.getenv("""HF_ALLOW_CODE_EVAL""" , 0 ) != "1": raise ValueError(_WARNING ) if os.name == "nt": raise NotImplementedError("""This metric is currently not supported on Windows.""" ) with ThreadPoolExecutor(max_workers=_UpperCAmelCase ) as executor: UpperCAmelCase__ = [] UpperCAmelCase__ = Counter() UpperCAmelCase__ = 0 UpperCAmelCase__ = defaultdict(_UpperCAmelCase ) for task_id, (candidates, test_case) in enumerate(zip(_UpperCAmelCase , _UpperCAmelCase ) ): for candidate in candidates: UpperCAmelCase__ = candidate + """\n""" + test_case UpperCAmelCase__ = (test_program, timeout, task_id, completion_id[task_id]) UpperCAmelCase__ = executor.submit(_UpperCAmelCase , *_UpperCAmelCase ) futures.append(_UpperCAmelCase ) completion_id[task_id] += 1 n_samples += 1 for future in as_completed(_UpperCAmelCase ): UpperCAmelCase__ = future.result() results[result["task_id"]].append((result["""completion_id"""], result) ) UpperCAmelCase__ , UpperCAmelCase__ = [], [] for result in results.values(): result.sort() UpperCAmelCase__ = [r[1]["""passed"""] for r in result] total.append(len(_UpperCAmelCase ) ) correct.append(sum(_UpperCAmelCase ) ) UpperCAmelCase__ = np.array(_UpperCAmelCase ) UpperCAmelCase__ = np.array(_UpperCAmelCase ) UpperCAmelCase__ = k UpperCAmelCase__ = {f'''pass@{k}''': estimate_pass_at_k(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).mean() for k in ks if (total >= k).all()} return pass_at_k, results def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] ): '''simple docstring''' def estimator(SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> float: if n - c < k: return 1.0 return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): UpperCAmelCase__ = itertools.repeat(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ) else: assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = iter(SCREAMING_SNAKE_CASE__ ) return np.array([estimator(int(SCREAMING_SNAKE_CASE__ ) , int(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) for n, c in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )] )
346
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' lowerCAmelCase_ : str = """timm_backbone""" def __init__( self : List[str] , _UpperCAmelCase : int=None , _UpperCAmelCase : Tuple=3 , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : Dict=True , _UpperCAmelCase : List[Any]=None , **_UpperCAmelCase : Dict , ): """simple docstring""" super().__init__(**_UpperCAmelCase ) UpperCAmelCase__ = backbone UpperCAmelCase__ = num_channels UpperCAmelCase__ = features_only UpperCAmelCase__ = use_pretrained_backbone UpperCAmelCase__ = True UpperCAmelCase__ = out_indices if out_indices is not None else (-1,)
346
'''simple docstring''' import math def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or not number % 2: # Negatives, 0, 1 and all even numbers are not primes return False UpperCAmelCase__ = range(3 , int(math.sqrt(SCREAMING_SNAKE_CASE__ ) + 1 ) , 2 ) return not any(not number % i for i in odd_numbers ) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str]=1 , **SCREAMING_SNAKE_CASE__ : List[str] ): '''simple docstring''' UpperCAmelCase__ = factor * value UpperCAmelCase__ = value while not is_prime(SCREAMING_SNAKE_CASE__ ): value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1 if value == first_value_val: return next_prime(value + 1 , **SCREAMING_SNAKE_CASE__ ) return value
346
1
'''simple docstring''' from string import ascii_lowercase, ascii_uppercase def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' if not sentence: return "" UpperCAmelCase__ = dict(zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:] if __name__ == "__main__": from doctest import testmod testmod()
346
'''simple docstring''' import string from math import logaa def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' UpperCAmelCase__ = document.translate( str.maketrans("""""" , """""" , string.punctuation ) ).replace("""\n""" , """""" ) UpperCAmelCase__ = document_without_punctuation.split(""" """ ) # word tokenization return len([word for word in tokenize_document if word.lower() == term.lower()] ) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' UpperCAmelCase__ = corpus.lower().translate( str.maketrans("""""" , """""" , string.punctuation ) ) # strip all punctuation and replace it with '' UpperCAmelCase__ = corpus_without_punctuation.split("""\n""" ) UpperCAmelCase__ = term.lower() return (len([doc for doc in docs if term in doc] ), len(SCREAMING_SNAKE_CASE__ )) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple=False ): '''simple docstring''' if smoothing: if n == 0: raise ValueError("""log10(0) is undefined.""" ) return round(1 + logaa(n / (1 + df) ) , 3 ) if df == 0: raise ZeroDivisionError("""df must be > 0""" ) elif n == 0: raise ValueError("""log10(0) is undefined.""" ) return round(logaa(n / df ) , 3 ) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' return round(tf * idf , 3 )
346
1
'''simple docstring''' import argparse import torch from transformers import ( WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaForAudioFrameClassification, WavaVecaForSequenceClassification, WavaVecaForXVector, logging, ) logging.set_verbosity_info() UpperCAmelCase_ = logging.get_logger(__name__) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] ): '''simple docstring''' UpperCAmelCase__ = WavaVecaForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE__ , config=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = downstream_dict["""projector.weight"""] UpperCAmelCase__ = downstream_dict["""projector.bias"""] UpperCAmelCase__ = downstream_dict["""model.post_net.linear.weight"""] UpperCAmelCase__ = downstream_dict["""model.post_net.linear.bias"""] return model def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] ): '''simple docstring''' UpperCAmelCase__ = WavaVecaForAudioFrameClassification.from_pretrained(SCREAMING_SNAKE_CASE__ , config=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = downstream_dict["""model.linear.weight"""] UpperCAmelCase__ = downstream_dict["""model.linear.bias"""] return model def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' UpperCAmelCase__ = WavaVecaForXVector.from_pretrained(SCREAMING_SNAKE_CASE__ , config=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = downstream_dict["""connector.weight"""] UpperCAmelCase__ = downstream_dict["""connector.bias"""] for i, kernel_size in enumerate(hf_config.tdnn_kernel ): UpperCAmelCase__ = downstream_dict[ F'''model.framelevel_feature_extractor.module.{i}.kernel.weight''' ] UpperCAmelCase__ = downstream_dict[F'''model.framelevel_feature_extractor.module.{i}.kernel.bias'''] UpperCAmelCase__ = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""] UpperCAmelCase__ = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""] UpperCAmelCase__ = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""] UpperCAmelCase__ = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""] UpperCAmelCase__ = downstream_dict["""objective.W"""] return model @torch.no_grad() def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict ): '''simple docstring''' UpperCAmelCase__ = torch.load(SCREAMING_SNAKE_CASE__ , map_location="""cpu""" ) UpperCAmelCase__ = checkpoint["""Downstream"""] UpperCAmelCase__ = WavaVecaConfig.from_pretrained(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = WavaVecaFeatureExtractor.from_pretrained( SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , do_normalize=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = hf_config.architectures[0] if arch.endswith("""ForSequenceClassification""" ): UpperCAmelCase__ = convert_classification(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) elif arch.endswith("""ForAudioFrameClassification""" ): UpperCAmelCase__ = convert_diarization(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) elif arch.endswith("""ForXVector""" ): UpperCAmelCase__ = convert_xvector(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else: raise NotImplementedError(F'''S3PRL weights conversion is not supported for {arch}''' ) if hf_config.use_weighted_layer_sum: UpperCAmelCase__ = checkpoint["""Featurizer"""]["""weights"""] hf_feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE__ ) hf_model.save_pretrained(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() parser.add_argument( '--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.' ) parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.') parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.') UpperCAmelCase_ = parser.parse_args() convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
346
'''simple docstring''' import argparse import torch from transformers import BertForMaskedLM if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser( description=( 'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned' ' Distillation' ) ) parser.add_argument('--model_type', default='bert', choices=['bert']) parser.add_argument('--model_name', default='bert-base-uncased', type=str) parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str) parser.add_argument('--vocab_transform', action='store_true') UpperCAmelCase_ = parser.parse_args() if args.model_type == "bert": UpperCAmelCase_ = BertForMaskedLM.from_pretrained(args.model_name) UpperCAmelCase_ = 'bert' else: raise ValueError('args.model_type should be "bert".') UpperCAmelCase_ = model.state_dict() UpperCAmelCase_ = {} for w in ["word_embeddings", "position_embeddings"]: UpperCAmelCase_ = state_dict[f"{prefix}.embeddings.{w}.weight"] for w in ["weight", "bias"]: UpperCAmelCase_ = state_dict[f"{prefix}.embeddings.LayerNorm.{w}"] UpperCAmelCase_ = 0 for teacher_idx in [0, 2, 4, 7, 9, 1_1]: for w in ["weight", "bias"]: UpperCAmelCase_ = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}" ] UpperCAmelCase_ = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}" ] UpperCAmelCase_ = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}" ] UpperCAmelCase_ = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}" ] UpperCAmelCase_ = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}" ] UpperCAmelCase_ = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}" ] UpperCAmelCase_ = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}" ] UpperCAmelCase_ = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}" ] std_idx += 1 UpperCAmelCase_ = state_dict['cls.predictions.decoder.weight'] UpperCAmelCase_ = state_dict['cls.predictions.bias'] if args.vocab_transform: for w in ["weight", "bias"]: UpperCAmelCase_ = state_dict[f"cls.predictions.transform.dense.{w}"] UpperCAmelCase_ = state_dict[f"cls.predictions.transform.LayerNorm.{w}"] print(f"N layers selected for distillation: {std_idx}") print(f"Number of params transferred for distillation: {len(compressed_sd.keys())}") print(f"Save transferred checkpoint to {args.dump_checkpoint}.") torch.save(compressed_sd, args.dump_checkpoint)
346
1
'''simple docstring''' import re from flax.core.frozen_dict import freeze from flax.traverse_util import flatten_dict, unflatten_dict from jax.experimental import PartitionSpec as P # Sentinels UpperCAmelCase_ = object() # For specifying empty leaf dict `{}` UpperCAmelCase_ = object() def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ = tuple((re.compile(x + """$""" ) for x in qs) ) for i in range(len(SCREAMING_SNAKE_CASE__ ) - len(SCREAMING_SNAKE_CASE__ ) + 1 ): UpperCAmelCase__ = [x.match(SCREAMING_SNAKE_CASE__ ) for x, y in zip(SCREAMING_SNAKE_CASE__ , ks[i:] )] if matches and all(SCREAMING_SNAKE_CASE__ ): return True return False def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : List[Any] ): '''simple docstring''' def replace(SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] ): for rule, replacement in rules: if _match(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): return replacement return val return replace def _UpperCamelCase ( ): '''simple docstring''' return [ # embeddings (("transformer", "wpe", "embedding"), P("""mp""" , SCREAMING_SNAKE_CASE__ )), (("transformer", "wte", "embedding"), P("""mp""" , SCREAMING_SNAKE_CASE__ )), # atention (("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(SCREAMING_SNAKE_CASE__ , """mp""" )), (("attention", "out_proj", "kernel"), P("""mp""" , SCREAMING_SNAKE_CASE__ )), (("attention", "out_proj", "bias"), None), # mlp (("mlp", "c_fc", "kernel"), P(SCREAMING_SNAKE_CASE__ , """mp""" )), (("mlp", "c_fc", "bias"), P("""mp""" )), (("mlp", "c_proj", "kernel"), P("""mp""" , SCREAMING_SNAKE_CASE__ )), (("mlp", "c_proj", "bias"), None), # layer norms ((r"ln_\d+", "bias"), None), ((r"\d+", r"ln_\d+", "scale"), None), (("ln_f", "bias"), None), (("ln_f", "scale"), None), ] def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Tuple ): '''simple docstring''' UpperCAmelCase__ = _get_partition_rules() UpperCAmelCase__ = _replacement_rules(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = {k: _unmatched for k in flatten_dict(SCREAMING_SNAKE_CASE__ )} UpperCAmelCase__ = {k: replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for k, v in initd.items()} assert _unmatched not in result.values(), "Incomplete partition spec." return freeze(unflatten_dict(SCREAMING_SNAKE_CASE__ ) )
346
'''simple docstring''' import tempfile import torch from diffusers import PNDMScheduler from .test_schedulers import SchedulerCommonTest class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' lowerCAmelCase_ : Union[str, Any] = (PNDMScheduler,) lowerCAmelCase_ : Optional[int] = (("""num_inference_steps""", 50),) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , **_UpperCAmelCase : Optional[int] ): """simple docstring""" UpperCAmelCase__ = { """num_train_timesteps""": 10_00, """beta_start""": 0.0001, """beta_end""": 0.02, """beta_schedule""": """linear""", } config.update(**_UpperCAmelCase ) return config def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : Tuple=0 , **_UpperCAmelCase : List[str] ): """simple docstring""" UpperCAmelCase__ = dict(self.forward_default_kwargs ) UpperCAmelCase__ = kwargs.pop("""num_inference_steps""" , _UpperCAmelCase ) UpperCAmelCase__ = self.dummy_sample UpperCAmelCase__ = 0.1 * sample UpperCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: UpperCAmelCase__ = self.get_scheduler_config(**_UpperCAmelCase ) UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residuals UpperCAmelCase__ = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_UpperCAmelCase ) UpperCAmelCase__ = scheduler_class.from_pretrained(_UpperCAmelCase ) new_scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residuals UpperCAmelCase__ = dummy_past_residuals[:] UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample UpperCAmelCase__ = new_scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample UpperCAmelCase__ = new_scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" pass def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : Union[str, Any]=0 , **_UpperCAmelCase : Optional[int] ): """simple docstring""" UpperCAmelCase__ = dict(self.forward_default_kwargs ) UpperCAmelCase__ = kwargs.pop("""num_inference_steps""" , _UpperCAmelCase ) UpperCAmelCase__ = self.dummy_sample UpperCAmelCase__ = 0.1 * sample UpperCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: UpperCAmelCase__ = self.get_scheduler_config() UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residuals (must be after setting timesteps) UpperCAmelCase__ = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_UpperCAmelCase ) UpperCAmelCase__ = scheduler_class.from_pretrained(_UpperCAmelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residual (must be after setting timesteps) UpperCAmelCase__ = dummy_past_residuals[:] UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample UpperCAmelCase__ = new_scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample UpperCAmelCase__ = new_scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def SCREAMING_SNAKE_CASE__ ( self : int , **_UpperCAmelCase : Tuple ): """simple docstring""" UpperCAmelCase__ = self.scheduler_classes[0] UpperCAmelCase__ = self.get_scheduler_config(**_UpperCAmelCase ) UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) UpperCAmelCase__ = 10 UpperCAmelCase__ = self.dummy_model() UpperCAmelCase__ = self.dummy_sample_deter scheduler.set_timesteps(_UpperCAmelCase ) for i, t in enumerate(scheduler.prk_timesteps ): UpperCAmelCase__ = model(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample for i, t in enumerate(scheduler.plms_timesteps ): UpperCAmelCase__ = model(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample return sample def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" UpperCAmelCase__ = dict(self.forward_default_kwargs ) UpperCAmelCase__ = kwargs.pop("""num_inference_steps""" , _UpperCAmelCase ) for scheduler_class in self.scheduler_classes: UpperCAmelCase__ = self.get_scheduler_config() UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) UpperCAmelCase__ = self.dummy_sample UpperCAmelCase__ = 0.1 * sample if num_inference_steps is not None and hasattr(_UpperCAmelCase , """set_timesteps""" ): scheduler.set_timesteps(_UpperCAmelCase ) elif num_inference_steps is not None and not hasattr(_UpperCAmelCase , """set_timesteps""" ): UpperCAmelCase__ = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) UpperCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] UpperCAmelCase__ = dummy_past_residuals[:] UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , 0 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , 1 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , 0 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , 1 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def SCREAMING_SNAKE_CASE__ ( self : int ): """simple docstring""" for timesteps in [1_00, 10_00]: self.check_over_configs(num_train_timesteps=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" for steps_offset in [0, 1]: self.check_over_configs(steps_offset=_UpperCAmelCase ) UpperCAmelCase__ = self.scheduler_classes[0] UpperCAmelCase__ = self.get_scheduler_config(steps_offset=1 ) UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(10 ) assert torch.equal( scheduler.timesteps , torch.LongTensor( [9_01, 8_51, 8_51, 8_01, 8_01, 7_51, 7_51, 7_01, 7_01, 6_51, 6_51, 6_01, 6_01, 5_01, 4_01, 3_01, 2_01, 1_01, 1] ) , ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): """simple docstring""" for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ): self.check_over_configs(beta_start=_UpperCAmelCase , beta_end=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" for t in [1, 5, 10]: self.check_over_forward(time_step=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00] ): self.check_over_forward(num_inference_steps=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" UpperCAmelCase__ = 27 for scheduler_class in self.scheduler_classes: UpperCAmelCase__ = self.dummy_sample UpperCAmelCase__ = 0.1 * sample UpperCAmelCase__ = self.get_scheduler_config() UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(_UpperCAmelCase ) # before power of 3 fix, would error on first step, so we only need to do two for i, t in enumerate(scheduler.prk_timesteps[:2] ): UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample def SCREAMING_SNAKE_CASE__ ( self : Dict ): """simple docstring""" with self.assertRaises(_UpperCAmelCase ): UpperCAmelCase__ = self.scheduler_classes[0] UpperCAmelCase__ = self.get_scheduler_config() UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): """simple docstring""" UpperCAmelCase__ = self.full_loop() UpperCAmelCase__ = torch.sum(torch.abs(_UpperCAmelCase ) ) UpperCAmelCase__ = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_sum.item() - 198.1318 ) < 1E-2 assert abs(result_mean.item() - 0.2580 ) < 1E-3 def SCREAMING_SNAKE_CASE__ ( self : Dict ): """simple docstring""" UpperCAmelCase__ = self.full_loop(prediction_type="""v_prediction""" ) UpperCAmelCase__ = torch.sum(torch.abs(_UpperCAmelCase ) ) UpperCAmelCase__ = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_sum.item() - 67.3986 ) < 1E-2 assert abs(result_mean.item() - 0.0878 ) < 1E-3 def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" UpperCAmelCase__ = self.full_loop(set_alpha_to_one=_UpperCAmelCase , beta_start=0.01 ) UpperCAmelCase__ = torch.sum(torch.abs(_UpperCAmelCase ) ) UpperCAmelCase__ = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_sum.item() - 230.0399 ) < 1E-2 assert abs(result_mean.item() - 0.2995 ) < 1E-3 def SCREAMING_SNAKE_CASE__ ( self : int ): """simple docstring""" UpperCAmelCase__ = self.full_loop(set_alpha_to_one=_UpperCAmelCase , beta_start=0.01 ) UpperCAmelCase__ = torch.sum(torch.abs(_UpperCAmelCase ) ) UpperCAmelCase__ = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_sum.item() - 186.9482 ) < 1E-2 assert abs(result_mean.item() - 0.2434 ) < 1E-3
346
1
'''simple docstring''' import unittest from transformers import DebertaVaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DebertaVaForMaskedLM, DebertaVaForMultipleChoice, DebertaVaForQuestionAnswering, DebertaVaForSequenceClassification, DebertaVaForTokenClassification, DebertaVaModel, ) from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' def __init__( self : int , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any]=13 , _UpperCAmelCase : Optional[Any]=7 , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : int=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Union[str, Any]=99 , _UpperCAmelCase : Dict=32 , _UpperCAmelCase : Any=5 , _UpperCAmelCase : List[str]=4 , _UpperCAmelCase : Optional[Any]=37 , _UpperCAmelCase : Optional[Any]="gelu" , _UpperCAmelCase : Union[str, Any]=0.1 , _UpperCAmelCase : Any=0.1 , _UpperCAmelCase : Any=5_12 , _UpperCAmelCase : Dict=16 , _UpperCAmelCase : Tuple=2 , _UpperCAmelCase : Dict=0.02 , _UpperCAmelCase : Optional[Any]=False , _UpperCAmelCase : str=True , _UpperCAmelCase : Optional[Any]="None" , _UpperCAmelCase : List[Any]=3 , _UpperCAmelCase : List[Any]=4 , _UpperCAmelCase : Union[str, Any]=None , ): """simple docstring""" UpperCAmelCase__ = parent UpperCAmelCase__ = batch_size UpperCAmelCase__ = seq_length UpperCAmelCase__ = is_training UpperCAmelCase__ = use_input_mask UpperCAmelCase__ = use_token_type_ids UpperCAmelCase__ = use_labels UpperCAmelCase__ = vocab_size UpperCAmelCase__ = hidden_size UpperCAmelCase__ = num_hidden_layers UpperCAmelCase__ = num_attention_heads UpperCAmelCase__ = intermediate_size UpperCAmelCase__ = hidden_act UpperCAmelCase__ = hidden_dropout_prob UpperCAmelCase__ = attention_probs_dropout_prob UpperCAmelCase__ = max_position_embeddings UpperCAmelCase__ = type_vocab_size UpperCAmelCase__ = type_sequence_label_size UpperCAmelCase__ = initializer_range UpperCAmelCase__ = num_labels UpperCAmelCase__ = num_choices UpperCAmelCase__ = relative_attention UpperCAmelCase__ = position_biased_input UpperCAmelCase__ = pos_att_type UpperCAmelCase__ = scope def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase__ = None if self.use_input_mask: UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) UpperCAmelCase__ = None if self.use_token_type_ids: UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCAmelCase__ = None UpperCAmelCase__ = None UpperCAmelCase__ = None if self.use_labels: UpperCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices ) UpperCAmelCase__ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): """simple docstring""" return DebertaVaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , ) def SCREAMING_SNAKE_CASE__ ( self : Any , _UpperCAmelCase : Tuple ): """simple docstring""" self.parent.assertListEqual(list(result.loss.size() ) , [] ) def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any] ): """simple docstring""" UpperCAmelCase__ = DebertaVaModel(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() UpperCAmelCase__ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase )[0] UpperCAmelCase__ = model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase )[0] UpperCAmelCase__ = model(_UpperCAmelCase )[0] self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Any ): """simple docstring""" UpperCAmelCase__ = DebertaVaForMaskedLM(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() UpperCAmelCase__ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[Any] ): """simple docstring""" UpperCAmelCase__ = self.num_labels UpperCAmelCase__ = DebertaVaForSequenceClassification(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() UpperCAmelCase__ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase ) self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] ) self.check_loss_output(_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] ): """simple docstring""" UpperCAmelCase__ = self.num_labels UpperCAmelCase__ = DebertaVaForTokenClassification(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() UpperCAmelCase__ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[str] ): """simple docstring""" UpperCAmelCase__ = DebertaVaForQuestionAnswering(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() UpperCAmelCase__ = model( _UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def SCREAMING_SNAKE_CASE__ ( self : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] ): """simple docstring""" UpperCAmelCase__ = DebertaVaForMultipleChoice(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() UpperCAmelCase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase__ = model( _UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" UpperCAmelCase__ = self.prepare_config_and_inputs() ( ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ) = config_and_inputs UpperCAmelCase__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class lowerCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ : List[str] = ( ( DebertaVaModel, DebertaVaForMaskedLM, DebertaVaForSequenceClassification, DebertaVaForTokenClassification, DebertaVaForQuestionAnswering, DebertaVaForMultipleChoice, ) if is_torch_available() else () ) lowerCAmelCase_ : Optional[int] = ( { """feature-extraction""": DebertaVaModel, """fill-mask""": DebertaVaForMaskedLM, """question-answering""": DebertaVaForQuestionAnswering, """text-classification""": DebertaVaForSequenceClassification, """token-classification""": DebertaVaForTokenClassification, """zero-shot""": DebertaVaForSequenceClassification, } if is_torch_available() else {} ) lowerCAmelCase_ : str = True lowerCAmelCase_ : Dict = False lowerCAmelCase_ : Tuple = False lowerCAmelCase_ : Union[str, Any] = False lowerCAmelCase_ : Optional[int] = False def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): """simple docstring""" UpperCAmelCase__ = DebertaVaModelTester(self ) UpperCAmelCase__ = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 ) def SCREAMING_SNAKE_CASE__ ( self : int ): """simple docstring""" self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_model(*_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): """simple docstring""" UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_sequence_classification(*_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : int ): """simple docstring""" UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_masked_lm(*_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_question_answering(*_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ): """simple docstring""" UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_token_classification(*_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : int ): """simple docstring""" UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_multiple_choice(*_UpperCAmelCase ) @slow def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ = DebertaVaModel.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) @require_torch @require_sentencepiece @require_tokenizers class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @unittest.skip(reason="""Model not available yet""" ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): """simple docstring""" pass @slow def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" UpperCAmelCase__ = DebertaVaModel.from_pretrained("""microsoft/deberta-v2-xlarge""" ) UpperCAmelCase__ = torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] ) UpperCAmelCase__ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): UpperCAmelCase__ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )[0] # compare the actual values for a slice. UpperCAmelCase__ = torch.tensor( [[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _UpperCAmelCase , atol=1E-4 ) , f'''{output[:, 1:4, 1:4]}''' )
346
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { 'google/vivit-b-16x2-kinetics400': ( 'https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json' ), # See all Vivit models at https://huggingface.co/models?filter=vivit } class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' lowerCAmelCase_ : Optional[int] = """vivit""" def __init__( self : List[str] , _UpperCAmelCase : List[Any]=2_24 , _UpperCAmelCase : List[str]=32 , _UpperCAmelCase : Any=[2, 16, 16] , _UpperCAmelCase : int=3 , _UpperCAmelCase : Optional[Any]=7_68 , _UpperCAmelCase : Union[str, Any]=12 , _UpperCAmelCase : Dict=12 , _UpperCAmelCase : Optional[Any]=30_72 , _UpperCAmelCase : Optional[int]="gelu_fast" , _UpperCAmelCase : Union[str, Any]=0.0 , _UpperCAmelCase : Tuple=0.0 , _UpperCAmelCase : Optional[int]=0.02 , _UpperCAmelCase : List[Any]=1E-06 , _UpperCAmelCase : List[str]=True , **_UpperCAmelCase : List[Any] , ): """simple docstring""" UpperCAmelCase__ = hidden_size UpperCAmelCase__ = num_hidden_layers UpperCAmelCase__ = num_attention_heads UpperCAmelCase__ = intermediate_size UpperCAmelCase__ = hidden_act UpperCAmelCase__ = hidden_dropout_prob UpperCAmelCase__ = attention_probs_dropout_prob UpperCAmelCase__ = initializer_range UpperCAmelCase__ = layer_norm_eps UpperCAmelCase__ = image_size UpperCAmelCase__ = num_frames UpperCAmelCase__ = tubelet_size UpperCAmelCase__ = num_channels UpperCAmelCase__ = qkv_bias super().__init__(**_UpperCAmelCase )
346
1
'''simple docstring''' from copy import deepcopy from typing import Optional, Union import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, is_tf_available, is_torch_available if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' lowerCAmelCase_ : Dict = ["""image_processor"""] lowerCAmelCase_ : int = """SamImageProcessor""" def __init__( self : Optional[Any] , _UpperCAmelCase : List[Any] ): """simple docstring""" super().__init__(_UpperCAmelCase ) UpperCAmelCase__ = self.image_processor UpperCAmelCase__ = -10 UpperCAmelCase__ = self.image_processor.size["""longest_edge"""] def __call__( self : Dict , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Dict=None , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , **_UpperCAmelCase : Union[str, Any] , ): """simple docstring""" UpperCAmelCase__ = self.image_processor( _UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase , ) # pop arguments that are not used in the foward but used nevertheless UpperCAmelCase__ = encoding_image_processor["""original_sizes"""] if hasattr(_UpperCAmelCase , """numpy""" ): # Checks if Torch or TF tensor UpperCAmelCase__ = original_sizes.numpy() UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self._check_and_preprocess_points( input_points=_UpperCAmelCase , input_labels=_UpperCAmelCase , input_boxes=_UpperCAmelCase , ) UpperCAmelCase__ = self._normalize_and_convert( _UpperCAmelCase , _UpperCAmelCase , input_points=_UpperCAmelCase , input_labels=_UpperCAmelCase , input_boxes=_UpperCAmelCase , return_tensors=_UpperCAmelCase , ) return encoding_image_processor def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : int=None , _UpperCAmelCase : Any="pt" , ): """simple docstring""" if input_points is not None: if len(_UpperCAmelCase ) != len(_UpperCAmelCase ): UpperCAmelCase__ = [ self._normalize_coordinates(self.target_size , _UpperCAmelCase , original_sizes[0] ) for point in input_points ] else: UpperCAmelCase__ = [ self._normalize_coordinates(self.target_size , _UpperCAmelCase , _UpperCAmelCase ) for point, original_size in zip(_UpperCAmelCase , _UpperCAmelCase ) ] # check that all arrays have the same shape if not all(point.shape == input_points[0].shape for point in input_points ): if input_labels is not None: UpperCAmelCase__ , UpperCAmelCase__ = self._pad_points_and_labels(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = np.array(_UpperCAmelCase ) if input_labels is not None: UpperCAmelCase__ = np.array(_UpperCAmelCase ) if input_boxes is not None: if len(_UpperCAmelCase ) != len(_UpperCAmelCase ): UpperCAmelCase__ = [ self._normalize_coordinates(self.target_size , _UpperCAmelCase , original_sizes[0] , is_bounding_box=_UpperCAmelCase ) for box in input_boxes ] else: UpperCAmelCase__ = [ self._normalize_coordinates(self.target_size , _UpperCAmelCase , _UpperCAmelCase , is_bounding_box=_UpperCAmelCase ) for box, original_size in zip(_UpperCAmelCase , _UpperCAmelCase ) ] UpperCAmelCase__ = np.array(_UpperCAmelCase ) if input_boxes is not None: if return_tensors == "pt": UpperCAmelCase__ = torch.from_numpy(_UpperCAmelCase ) # boxes batch size of 1 by default UpperCAmelCase__ = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes elif return_tensors == "tf": UpperCAmelCase__ = tf.convert_to_tensor(_UpperCAmelCase ) # boxes batch size of 1 by default UpperCAmelCase__ = tf.expand_dims(_UpperCAmelCase , 1 ) if len(input_boxes.shape ) != 3 else input_boxes encoding_image_processor.update({"""input_boxes""": input_boxes} ) if input_points is not None: if return_tensors == "pt": UpperCAmelCase__ = torch.from_numpy(_UpperCAmelCase ) # point batch size of 1 by default UpperCAmelCase__ = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points elif return_tensors == "tf": UpperCAmelCase__ = tf.convert_to_tensor(_UpperCAmelCase ) # point batch size of 1 by default UpperCAmelCase__ = tf.expand_dims(_UpperCAmelCase , 1 ) if len(input_points.shape ) != 4 else input_points encoding_image_processor.update({"""input_points""": input_points} ) if input_labels is not None: if return_tensors == "pt": UpperCAmelCase__ = torch.from_numpy(_UpperCAmelCase ) # point batch size of 1 by default UpperCAmelCase__ = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels elif return_tensors == "tf": UpperCAmelCase__ = tf.convert_to_tensor(_UpperCAmelCase ) # point batch size of 1 by default UpperCAmelCase__ = tf.expand_dims(_UpperCAmelCase , 1 ) if len(input_labels.shape ) != 3 else input_labels encoding_image_processor.update({"""input_labels""": input_labels} ) return encoding_image_processor def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] ): """simple docstring""" UpperCAmelCase__ = max([point.shape[0] for point in input_points] ) UpperCAmelCase__ = [] for i, point in enumerate(_UpperCAmelCase ): if point.shape[0] != expected_nb_points: UpperCAmelCase__ = np.concatenate( [point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 ) UpperCAmelCase__ = np.append(input_labels[i] , [self.point_pad_value] ) processed_input_points.append(_UpperCAmelCase ) UpperCAmelCase__ = processed_input_points return input_points, input_labels def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : int , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int]=False ): """simple docstring""" UpperCAmelCase__ , UpperCAmelCase__ = original_size UpperCAmelCase__ , UpperCAmelCase__ = self.image_processor._get_preprocess_shape(_UpperCAmelCase , longest_edge=_UpperCAmelCase ) UpperCAmelCase__ = deepcopy(_UpperCAmelCase ).astype(_UpperCAmelCase ) if is_bounding_box: UpperCAmelCase__ = coords.reshape(-1 , 2 , 2 ) UpperCAmelCase__ = coords[..., 0] * (new_w / old_w) UpperCAmelCase__ = coords[..., 1] * (new_h / old_h) if is_bounding_box: UpperCAmelCase__ = coords.reshape(-1 , 4 ) return coords def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : str=None , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : str=None , ): """simple docstring""" if input_points is not None: if hasattr(_UpperCAmelCase , """numpy""" ): # Checks for TF or Torch tensor UpperCAmelCase__ = input_points.numpy().tolist() if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or not isinstance(input_points[0] , _UpperCAmelCase ): raise ValueError("""Input points must be a list of list of floating points.""" ) UpperCAmelCase__ = [np.array(_UpperCAmelCase ) for input_point in input_points] else: UpperCAmelCase__ = None if input_labels is not None: if hasattr(_UpperCAmelCase , """numpy""" ): UpperCAmelCase__ = input_labels.numpy().tolist() if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or not isinstance(input_labels[0] , _UpperCAmelCase ): raise ValueError("""Input labels must be a list of list integers.""" ) UpperCAmelCase__ = [np.array(_UpperCAmelCase ) for label in input_labels] else: UpperCAmelCase__ = None if input_boxes is not None: if hasattr(_UpperCAmelCase , """numpy""" ): UpperCAmelCase__ = input_boxes.numpy().tolist() if ( not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or not isinstance(input_boxes[0] , _UpperCAmelCase ) or not isinstance(input_boxes[0][0] , _UpperCAmelCase ) ): raise ValueError("""Input boxes must be a list of list of list of floating points.""" ) UpperCAmelCase__ = [np.array(_UpperCAmelCase ).astype(np.floataa ) for box in input_boxes] else: UpperCAmelCase__ = None return input_points, input_labels, input_boxes @property def SCREAMING_SNAKE_CASE__ ( self : int ): """simple docstring""" UpperCAmelCase__ = self.image_processor.model_input_names return list(dict.fromkeys(_UpperCAmelCase ) ) def SCREAMING_SNAKE_CASE__ ( self : int , *_UpperCAmelCase : List[str] , **_UpperCAmelCase : List[Any] ): """simple docstring""" return self.image_processor.post_process_masks(*_UpperCAmelCase , **_UpperCAmelCase )
346
'''simple docstring''' import warnings from ...utils import logging from .image_processing_deit import DeiTImageProcessor UpperCAmelCase_ = logging.get_logger(__name__) class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' def __init__( self : List[str] , *_UpperCAmelCase : Optional[int] , **_UpperCAmelCase : Optional[Any] ): """simple docstring""" warnings.warn( """The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use DeiTImageProcessor instead.""" , _UpperCAmelCase , ) super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
346
1
'''simple docstring''' from copy import deepcopy class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Union[str, Any] , _UpperCAmelCase : list[int] | None = None , _UpperCAmelCase : int | None = None ): """simple docstring""" if arr is None and size is not None: UpperCAmelCase__ = size UpperCAmelCase__ = [0] * size elif arr is not None: self.init(_UpperCAmelCase ) else: raise ValueError("""Either arr or size must be specified""" ) def SCREAMING_SNAKE_CASE__ ( self : str , _UpperCAmelCase : list[int] ): """simple docstring""" UpperCAmelCase__ = len(_UpperCAmelCase ) UpperCAmelCase__ = deepcopy(_UpperCAmelCase ) for i in range(1 , self.size ): UpperCAmelCase__ = self.next_(_UpperCAmelCase ) if j < self.size: self.tree[j] += self.tree[i] def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" UpperCAmelCase__ = self.tree[:] for i in range(self.size - 1 , 0 , -1 ): UpperCAmelCase__ = self.next_(_UpperCAmelCase ) if j < self.size: arr[j] -= arr[i] return arr @staticmethod def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase : int ): """simple docstring""" return index + (index & (-index)) @staticmethod def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase : int ): """simple docstring""" return index - (index & (-index)) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : int ): """simple docstring""" if index == 0: self.tree[0] += value return while index < self.size: self.tree[index] += value UpperCAmelCase__ = self.next_(_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : int , _UpperCAmelCase : int ): """simple docstring""" self.add(_UpperCAmelCase , value - self.get(_UpperCAmelCase ) ) def SCREAMING_SNAKE_CASE__ ( self : Tuple , _UpperCAmelCase : int ): """simple docstring""" if right == 0: return 0 UpperCAmelCase__ = self.tree[0] right -= 1 # make right inclusive while right > 0: result += self.tree[right] UpperCAmelCase__ = self.prev(_UpperCAmelCase ) return result def SCREAMING_SNAKE_CASE__ ( self : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : int ): """simple docstring""" return self.prefix(_UpperCAmelCase ) - self.prefix(_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Tuple , _UpperCAmelCase : int ): """simple docstring""" return self.query(_UpperCAmelCase , index + 1 ) def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : int ): """simple docstring""" value -= self.tree[0] if value < 0: return -1 UpperCAmelCase__ = 1 # Largest power of 2 <= size while j * 2 < self.size: j *= 2 UpperCAmelCase__ = 0 while j > 0: if i + j < self.size and self.tree[i + j] <= value: value -= self.tree[i + j] i += j j //= 2 return i if __name__ == "__main__": import doctest doctest.testmod()
346
'''simple docstring''' import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = {'vocab_file': 'spiece.model'} UpperCAmelCase_ = { 'vocab_file': { 'TsinghuaAI/CPM-Generate': 'https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model', } } class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' def __init__( self : Dict , _UpperCAmelCase : str , _UpperCAmelCase : Any=False , _UpperCAmelCase : int=True , _UpperCAmelCase : Union[str, Any]=False , _UpperCAmelCase : Dict="<s>" , _UpperCAmelCase : int="</s>" , _UpperCAmelCase : Dict="<unk>" , _UpperCAmelCase : Tuple="<sep>" , _UpperCAmelCase : List[Any]="<pad>" , _UpperCAmelCase : int="<cls>" , _UpperCAmelCase : Union[str, Any]="<mask>" , _UpperCAmelCase : List[str]=["<eop>", "<eod>"] , _UpperCAmelCase : Optional[Dict[str, Any]] = None , **_UpperCAmelCase : int , ): """simple docstring""" UpperCAmelCase__ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token UpperCAmelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=_UpperCAmelCase , remove_space=_UpperCAmelCase , keep_accents=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , ) UpperCAmelCase__ = 3 UpperCAmelCase__ = do_lower_case UpperCAmelCase__ = remove_space UpperCAmelCase__ = keep_accents UpperCAmelCase__ = vocab_file UpperCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_UpperCAmelCase ) try: import jieba except ModuleNotFoundError as error: raise error.__class__( """You need to install jieba to use CpmTokenizer or CpmTokenizerFast. """ """See https://pypi.org/project/jieba/ for installation.""" ) UpperCAmelCase__ = jieba UpperCAmelCase__ = str.maketrans(""" \n""" , """\u2582\u2583""" ) @property # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" return len(self.sp_model ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" UpperCAmelCase__ = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Dict ): """simple docstring""" UpperCAmelCase__ = self.__dict__.copy() UpperCAmelCase__ = None return state def __setstate__( self : Union[str, Any] , _UpperCAmelCase : Union[str, Any] ): """simple docstring""" UpperCAmelCase__ = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): UpperCAmelCase__ = {} UpperCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : Optional[Any] ): """simple docstring""" if self.remove_space: UpperCAmelCase__ = """ """.join(inputs.strip().split() ) else: UpperCAmelCase__ = inputs UpperCAmelCase__ = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" ) if not self.keep_accents: UpperCAmelCase__ = unicodedata.normalize("""NFKD""" , _UpperCAmelCase ) UpperCAmelCase__ = """""".join([c for c in outputs if not unicodedata.combining(_UpperCAmelCase )] ) if self.do_lower_case: UpperCAmelCase__ = outputs.lower() return outputs def SCREAMING_SNAKE_CASE__ ( self : Tuple , _UpperCAmelCase : str ): """simple docstring""" UpperCAmelCase__ = self.preprocess_text(_UpperCAmelCase ) UpperCAmelCase__ = self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase ) UpperCAmelCase__ = [] for piece in pieces: if len(_UpperCAmelCase ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit(): UpperCAmelCase__ = self.sp_model.EncodeAsPieces(piece[:-1].replace(_UpperCAmelCase , """""" ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: UpperCAmelCase__ = cur_pieces[1:] else: UpperCAmelCase__ = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(_UpperCAmelCase ) else: new_pieces.append(_UpperCAmelCase ) return new_pieces def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : Union[str, Any] ): """simple docstring""" return self.sp_model.PieceToId(_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : Any ): """simple docstring""" return self.sp_model.IdToPiece(_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : Dict ): """simple docstring""" UpperCAmelCase__ = """""".join(_UpperCAmelCase ).replace(_UpperCAmelCase , """ """ ).strip() return out_string def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ): """simple docstring""" UpperCAmelCase__ = [self.sep_token_id] UpperCAmelCase__ = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None , _UpperCAmelCase : bool = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase ) if token_ids_a is not None: return ([0] * len(_UpperCAmelCase )) + [1] + ([0] * len(_UpperCAmelCase )) + [1, 1] return ([0] * len(_UpperCAmelCase )) + [1, 1] def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ): """simple docstring""" UpperCAmelCase__ = [self.sep_token_id] UpperCAmelCase__ = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ): """simple docstring""" if not os.path.isdir(_UpperCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return UpperCAmelCase__ = os.path.join( _UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(_UpperCAmelCase , """wb""" ) as fi: UpperCAmelCase__ = self.sp_model.serialized_model_proto() fi.write(_UpperCAmelCase ) return (out_vocab_file,) def SCREAMING_SNAKE_CASE__ ( self : Tuple , *_UpperCAmelCase : Tuple , **_UpperCAmelCase : Optional[Any] ): """simple docstring""" UpperCAmelCase__ = super()._decode(*_UpperCAmelCase , **_UpperCAmelCase ) UpperCAmelCase__ = text.replace(""" """ , """""" ).replace("""\u2582""" , """ """ ).replace("""\u2583""" , """\n""" ) return text
346
1
'''simple docstring''' import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, AutoConfig, AutoFeatureExtractor, WavaVecaConfig, WavaVecaFeatureExtractor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 UpperCAmelCase_ = get_tests_dir('fixtures') UpperCAmelCase_ = get_tests_dir('fixtures/dummy_feature_extractor_config.json') UpperCAmelCase_ = get_tests_dir('fixtures/dummy-config.json') class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Dict ): """simple docstring""" UpperCAmelCase__ = 0 def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained("""facebook/wav2vec2-base-960h""" ) self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): """simple docstring""" UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained(_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase__ = WavaVecaConfig() # remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained(_UpperCAmelCase ).to_dict() config_dict.pop("""feature_extractor_type""" ) UpperCAmelCase__ = WavaVecaFeatureExtractor(**_UpperCAmelCase ) # save in new folder model_config.save_pretrained(_UpperCAmelCase ) config.save_pretrained(_UpperCAmelCase ) UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained(_UpperCAmelCase ) # make sure private variable is not incorrectly saved UpperCAmelCase__ = json.loads(config.to_json_string() ) self.assertTrue("""_processor_class""" not in dict_as_saved ) self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained(_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ): """simple docstring""" with self.assertRaisesRegex( _UpperCAmelCase , """bert-base is not a local folder and is not a valid model identifier""" ): UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained("""bert-base""" ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" with self.assertRaisesRegex( _UpperCAmelCase , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ): UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained(_UpperCAmelCase , revision="""aaaaaa""" ) def SCREAMING_SNAKE_CASE__ ( self : int ): """simple docstring""" with self.assertRaisesRegex( _UpperCAmelCase , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ): UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained("""hf-internal-testing/config-no-model""" ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" with self.assertRaises(_UpperCAmelCase ): UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" ) # If remote code is disabled, we can't load this config. with self.assertRaises(_UpperCAmelCase ): UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=_UpperCAmelCase ) UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=_UpperCAmelCase ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) # Test feature extractor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(_UpperCAmelCase ) UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained(_UpperCAmelCase , trust_remote_code=_UpperCAmelCase ) self.assertEqual(reloaded_feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" try: AutoConfig.register("""custom""" , _UpperCAmelCase ) AutoFeatureExtractor.register(_UpperCAmelCase , _UpperCAmelCase ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(_UpperCAmelCase ): AutoFeatureExtractor.register(_UpperCAmelCase , _UpperCAmelCase ) # Now that the config is registered, it can be used as any other config with the auto-API UpperCAmelCase__ = CustomFeatureExtractor.from_pretrained(_UpperCAmelCase ) with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(_UpperCAmelCase ) UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained(_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] def SCREAMING_SNAKE_CASE__ ( self : Tuple ): """simple docstring""" class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' lowerCAmelCase_ : Optional[Any] = True try: AutoConfig.register("""custom""" , _UpperCAmelCase ) AutoFeatureExtractor.register(_UpperCAmelCase , _UpperCAmelCase ) # If remote code is not set, the default is to use local UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) self.assertTrue(feature_extractor.is_local ) # If remote code is disabled, we load the local one. UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=_UpperCAmelCase ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) self.assertTrue(feature_extractor.is_local ) # If remote is enabled, we load from the Hub UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=_UpperCAmelCase ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) self.assertTrue(not hasattr(_UpperCAmelCase , """is_local""" ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
346
'''simple docstring''' import argparse import logging import os import datasets import tensorflow as tf from transformers import AutoTokenizer UpperCAmelCase_ = logging.getLogger(__name__) def _UpperCamelCase ( ): '''simple docstring''' UpperCAmelCase__ = argparse.ArgumentParser( description="""Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.""" ) parser.add_argument( """--dataset_name""" , type=SCREAMING_SNAKE_CASE__ , default="""wikitext""" , help="""Name of the training. Explore datasets at: hf.co/datasets.""" , ) parser.add_argument( """--dataset_config""" , type=SCREAMING_SNAKE_CASE__ , default="""wikitext-103-raw-v1""" , help="""Configuration name of the dataset.""" ) parser.add_argument( """--tokenizer_name_or_path""" , type=SCREAMING_SNAKE_CASE__ , default="""sayakpaul/unigram-tokenizer-wikitext""" , help="""Tokenizer identifier. Can be a local filepath or a Hub identifier.""" , ) parser.add_argument( """--shard_size""" , type=SCREAMING_SNAKE_CASE__ , default=1000 , help="""Number of entries to go in a single shard.""" , ) parser.add_argument("""--split""" , type=SCREAMING_SNAKE_CASE__ , default="""train""" , choices=["""train""", """test""", """validation"""] ) parser.add_argument( """--limit""" , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , help="""Limit the number of shards (used for debugging).""" , ) parser.add_argument( """--max_length""" , type=SCREAMING_SNAKE_CASE__ , default=512 , help="""Maximum sequence length. For training on TPUs, it helps to have a maximum""" """ sequence length that is a multiple of 8.""" , ) parser.add_argument( """--output_dir""" , default="""tf-tpu""" , type=SCREAMING_SNAKE_CASE__ , help="""Output directory where the TFRecord shards will be saved. If the""" """ path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord""" """ shards will be directly saved to a Google Cloud Storage bucket.""" , ) UpperCAmelCase__ = parser.parse_args() return args def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Any ): '''simple docstring''' def fn(SCREAMING_SNAKE_CASE__ : Union[str, Any] ): return tokenizer(examples["""text"""] ) return fn def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] ): '''simple docstring''' UpperCAmelCase__ = [] for i in range(len(tokenized_data["""input_ids"""] ) ): UpperCAmelCase__ = { """input_ids""": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["""input_ids"""][i] ) ), """attention_mask""": tf.train.Feature( intaa_list=tf.train.IntaaList(value=tokenized_data["""attention_mask"""][i] ) ), } UpperCAmelCase__ = tf.train.Features(feature=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = tf.train.Example(features=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = example.SerializeToString() records.append(SCREAMING_SNAKE_CASE__ ) return records def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Any ): '''simple docstring''' UpperCAmelCase__ = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split ) if args.limit is not None: UpperCAmelCase__ = min(len(SCREAMING_SNAKE_CASE__ ) , args.limit ) UpperCAmelCase__ = dataset.select(range(SCREAMING_SNAKE_CASE__ ) ) print(F'''Limiting the dataset to {args.limit} entries.''' ) UpperCAmelCase__ = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path ) # Handle output directory creation. # For serializing into a Google Cloud Storage Bucket, one needs to first # create a bucket. if "gs" not in args.output_dir: if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) UpperCAmelCase__ = os.path.join(args.output_dir , args.split ) if not os.path.exists(SCREAMING_SNAKE_CASE__ ): os.makedirs(SCREAMING_SNAKE_CASE__ ) else: UpperCAmelCase__ = os.path.join(args.output_dir , args.split ) # Tokenize the whole dataset at once. UpperCAmelCase__ = tokenize_function(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = dataset.map(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , num_proc=4 , remove_columns=["""text"""] ) # We need to concatenate all our texts together, and then split the result # into chunks of a fixed size, which we will call block_size. To do this, we # will use the map method again, with the option batched=True. When we use batched=True, # the function we pass to map() will be passed multiple inputs at once, allowing us # to group them into more or fewer examples than we had in the input. # This allows us to create our new fixed-length samples. The advantage of this # method is that we don't lose a whole lot of content from the dataset compared to the # case where we simply tokenize with a pre-defined max_length. def group_texts(SCREAMING_SNAKE_CASE__ : int ): # Concatenate all texts. UpperCAmelCase__ = {k: sum(examples[k] , [] ) for k in examples.keys()} UpperCAmelCase__ = len(concatenated_examples[list(examples.keys() )[0]] ) # We drop the small remainder, though you could add padding instead if the model supports it # In this, as in all things, we advise you to follow your heart 🫀 UpperCAmelCase__ = (total_length // args.max_length) * args.max_length # Split by chunks of max_len. UpperCAmelCase__ = { k: [t[i : i + args.max_length] for i in range(0 , SCREAMING_SNAKE_CASE__ , args.max_length )] for k, t in concatenated_examples.items() } return result UpperCAmelCase__ = dataset_tokenized.map(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , batch_size=1000 , num_proc=4 ) UpperCAmelCase__ = 0 UpperCAmelCase__ = 0 for shard in range(0 , len(SCREAMING_SNAKE_CASE__ ) , args.shard_size ): UpperCAmelCase__ = grouped_dataset[shard : shard + args.shard_size] UpperCAmelCase__ = len(dataset_snapshot["""input_ids"""] ) UpperCAmelCase__ = os.path.join(SCREAMING_SNAKE_CASE__ , F'''dataset-{shard_count}-{records_containing}.tfrecord''' ) UpperCAmelCase__ = get_serialized_examples(SCREAMING_SNAKE_CASE__ ) with tf.io.TFRecordWriter(SCREAMING_SNAKE_CASE__ ) as out_file: for i in range(len(SCREAMING_SNAKE_CASE__ ) ): UpperCAmelCase__ = serialized_examples[i] out_file.write(SCREAMING_SNAKE_CASE__ ) print("""Wrote file {} containing {} records""".format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) shard_count += 1 total_records += records_containing with open(F'''split-{args.split}-records-count.txt''' , """w""" ) as f: print(F'''Total {args.split} records: {total_records}''' , file=SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": UpperCAmelCase_ = parse_args() main(args)
346
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available UpperCAmelCase_ = { 'configuration_squeezebert': [ 'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SqueezeBertConfig', 'SqueezeBertOnnxConfig', ], 'tokenization_squeezebert': ['SqueezeBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = ['SqueezeBertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ 'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'SqueezeBertForMaskedLM', 'SqueezeBertForMultipleChoice', 'SqueezeBertForQuestionAnswering', 'SqueezeBertForSequenceClassification', 'SqueezeBertForTokenClassification', 'SqueezeBertModel', 'SqueezeBertModule', 'SqueezeBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_squeezebert import ( SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, SqueezeBertConfig, SqueezeBertOnnxConfig, ) from .tokenization_squeezebert import SqueezeBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_squeezebert import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, SqueezeBertModule, SqueezeBertPreTrainedModel, ) else: import sys UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
346
'''simple docstring''' import numpy as np import torch from torch.nn import CrossEntropyLoss from transformers import AutoModelForCausalLM, AutoTokenizer import datasets from datasets import logging UpperCAmelCase_ = '\\n\n' UpperCAmelCase_ = '\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n' UpperCAmelCase_ = '\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to \'cuda\' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 78.22\n >>> print(round(results["perplexities"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = datasets.load_dataset("wikitext",\n ... "wikitext-2-raw-v1",\n ... split="test")["text"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!=\'\']\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 60.35\n >>> print(round(results["perplexities"][0], 2))\n 81.12\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase_ ( datasets.Metric ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Dict ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """input_texts""": datasets.Value("""string""" ), } ) , reference_urls=["""https://huggingface.co/docs/transformers/perplexity"""] , ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : int = 16 , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[int]=None ): """simple docstring""" if device is not None: assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu." if device == "gpu": UpperCAmelCase__ = """cuda""" else: UpperCAmelCase__ = """cuda""" if torch.cuda.is_available() else """cpu""" UpperCAmelCase__ = AutoModelForCausalLM.from_pretrained(_UpperCAmelCase ) UpperCAmelCase__ = model.to(_UpperCAmelCase ) UpperCAmelCase__ = AutoTokenizer.from_pretrained(_UpperCAmelCase ) # if batch_size > 1 (which generally leads to padding being required), and # if there is not an already assigned pad_token, assign an existing # special token to also be the padding token if tokenizer.pad_token is None and batch_size > 1: UpperCAmelCase__ = list(tokenizer.special_tokens_map_extended.values() ) # check that the model already has at least one special token defined assert ( len(_UpperCAmelCase ) > 0 ), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1." # assign one of the special tokens to also be the pad token tokenizer.add_special_tokens({"""pad_token""": existing_special_tokens[0]} ) if add_start_token: # leave room for <BOS> token to be added: assert ( tokenizer.bos_token is not None ), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False" UpperCAmelCase__ = model.config.max_length - 1 else: UpperCAmelCase__ = model.config.max_length UpperCAmelCase__ = tokenizer( _UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , return_tensors="""pt""" , return_attention_mask=_UpperCAmelCase , ).to(_UpperCAmelCase ) UpperCAmelCase__ = encodings["""input_ids"""] UpperCAmelCase__ = encodings["""attention_mask"""] # check that each input is long enough: if add_start_token: assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long." else: assert torch.all( torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings." UpperCAmelCase__ = [] UpperCAmelCase__ = CrossEntropyLoss(reduction="""none""" ) for start_index in logging.tqdm(range(0 , len(_UpperCAmelCase ) , _UpperCAmelCase ) ): UpperCAmelCase__ = min(start_index + batch_size , len(_UpperCAmelCase ) ) UpperCAmelCase__ = encoded_texts[start_index:end_index] UpperCAmelCase__ = attn_masks[start_index:end_index] if add_start_token: UpperCAmelCase__ = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(_UpperCAmelCase ) UpperCAmelCase__ = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 ) UpperCAmelCase__ = torch.cat( [torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(_UpperCAmelCase ), attn_mask] , dim=1 ) UpperCAmelCase__ = encoded_batch with torch.no_grad(): UpperCAmelCase__ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase ).logits UpperCAmelCase__ = out_logits[..., :-1, :].contiguous() UpperCAmelCase__ = labels[..., 1:].contiguous() UpperCAmelCase__ = attn_mask[..., 1:].contiguous() UpperCAmelCase__ = torch.expa( (loss_fct(shift_logits.transpose(1 , 2 ) , _UpperCAmelCase ) * shift_attention_mask_batch).sum(1 ) / shift_attention_mask_batch.sum(1 ) ) ppls += perplexity_batch.tolist() return {"perplexities": ppls, "mean_perplexity": np.mean(_UpperCAmelCase )}
346
1
'''simple docstring''' def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ): '''simple docstring''' if not head: return True # split the list to two parts UpperCAmelCase__ , UpperCAmelCase__ = head.next, head while fast and fast.next: UpperCAmelCase__ = fast.next.next UpperCAmelCase__ = slow.next UpperCAmelCase__ = slow.next UpperCAmelCase__ = None # Don't forget here! But forget still works! # reverse the second part UpperCAmelCase__ = None while second: UpperCAmelCase__ = second.next UpperCAmelCase__ = node UpperCAmelCase__ = second UpperCAmelCase__ = nxt # compare two parts # second part has the same or one less node while node: if node.val != head.val: return False UpperCAmelCase__ = node.next UpperCAmelCase__ = head.next return True def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Any ): '''simple docstring''' if not head or not head.next: return True # 1. Get the midpoint (slow) UpperCAmelCase__ = UpperCAmelCase__ = UpperCAmelCase__ = head while fast and fast.next: UpperCAmelCase__ , UpperCAmelCase__ = fast.next.next, slow.next # 2. Push the second half into the stack UpperCAmelCase__ = [slow.val] while slow.next: UpperCAmelCase__ = slow.next stack.append(slow.val ) # 3. Comparison while stack: if stack.pop() != cur.val: return False UpperCAmelCase__ = cur.next return True def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' if not head or not head.next: return True UpperCAmelCase__ = {} UpperCAmelCase__ = 0 while head: if head.val in d: d[head.val].append(SCREAMING_SNAKE_CASE__ ) else: UpperCAmelCase__ = [pos] UpperCAmelCase__ = head.next pos += 1 UpperCAmelCase__ = pos - 1 UpperCAmelCase__ = 0 for v in d.values(): if len(SCREAMING_SNAKE_CASE__ ) % 2 != 0: middle += 1 else: UpperCAmelCase__ = 0 for i in range(0 , len(SCREAMING_SNAKE_CASE__ ) ): if v[i] + v[len(SCREAMING_SNAKE_CASE__ ) - 1 - step] != checksum: return False step += 1 if middle > 1: return False return True
346
'''simple docstring''' def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int = 1000000 ): '''simple docstring''' UpperCAmelCase__ = [i - 1 for i in range(limit + 1 )] for i in range(2 , limit + 1 ): if phi[i] == i - 1: for j in range(2 * i , limit + 1 , SCREAMING_SNAKE_CASE__ ): phi[j] -= phi[j] // i return sum(phi[2 : limit + 1] ) if __name__ == "__main__": print(solution())
346
1
'''simple docstring''' from PIL import Image def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Image , SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' UpperCAmelCase__ = (259 * (level + 255)) / (255 * (259 - level)) def contrast(SCREAMING_SNAKE_CASE__ : int ) -> int: return int(128 + factor * (c - 128) ) return img.point(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": # Load image with Image.open('image_data/lena.jpg') as img: # Change contrast to 170 UpperCAmelCase_ = change_contrast(img, 1_7_0) cont_img.save('image_data/lena_high_contrast.png', format='png')
346
'''simple docstring''' from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING UpperCAmelCase_ = logging.get_logger(__name__) @add_end_docstrings(lowerCamelCase_ ) class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' def __init__( self : Optional[Any] , *_UpperCAmelCase : Union[str, Any] , **_UpperCAmelCase : Dict ): """simple docstring""" super().__init__(*_UpperCAmelCase , **_UpperCAmelCase ) requires_backends(self , """vision""" ) self.check_model_type( TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING if self.framework == """tf""" else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING ) def SCREAMING_SNAKE_CASE__ ( self : str , _UpperCAmelCase : List[Any]=None ): """simple docstring""" UpperCAmelCase__ = {} if top_k is not None: UpperCAmelCase__ = top_k return {}, {}, postprocess_params def __call__( self : Any , _UpperCAmelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_UpperCAmelCase : str ): """simple docstring""" return super().__call__(_UpperCAmelCase , **_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , _UpperCAmelCase : Tuple ): """simple docstring""" UpperCAmelCase__ = load_image(_UpperCAmelCase ) UpperCAmelCase__ = self.image_processor(images=_UpperCAmelCase , return_tensors=self.framework ) return model_inputs def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : Tuple ): """simple docstring""" UpperCAmelCase__ = self.model(**_UpperCAmelCase ) return model_outputs def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : str=5 ): """simple docstring""" if top_k > self.model.config.num_labels: UpperCAmelCase__ = self.model.config.num_labels if self.framework == "pt": UpperCAmelCase__ = model_outputs.logits.softmax(-1 )[0] UpperCAmelCase__ , UpperCAmelCase__ = probs.topk(_UpperCAmelCase ) elif self.framework == "tf": UpperCAmelCase__ = stable_softmax(model_outputs.logits , axis=-1 )[0] UpperCAmelCase__ = tf.math.top_k(_UpperCAmelCase , k=_UpperCAmelCase ) UpperCAmelCase__ , UpperCAmelCase__ = topk.values.numpy(), topk.indices.numpy() else: raise ValueError(f'''Unsupported framework: {self.framework}''' ) UpperCAmelCase__ = scores.tolist() UpperCAmelCase__ = ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCAmelCase , _UpperCAmelCase )]
346
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices UpperCAmelCase_ = logging.get_logger(__name__) class lowerCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ ): '''simple docstring''' lowerCAmelCase_ : Optional[Any] = """maskformer-swin""" lowerCAmelCase_ : Optional[Any] = { """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers""", } def __init__( self : Dict , _UpperCAmelCase : Optional[Any]=2_24 , _UpperCAmelCase : List[str]=4 , _UpperCAmelCase : str=3 , _UpperCAmelCase : str=96 , _UpperCAmelCase : List[str]=[2, 2, 6, 2] , _UpperCAmelCase : str=[3, 6, 12, 24] , _UpperCAmelCase : str=7 , _UpperCAmelCase : int=4.0 , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : List[Any]=0.0 , _UpperCAmelCase : str=0.0 , _UpperCAmelCase : int=0.1 , _UpperCAmelCase : Any="gelu" , _UpperCAmelCase : Optional[int]=False , _UpperCAmelCase : int=0.02 , _UpperCAmelCase : Dict=1E-5 , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : List[Any]=None , **_UpperCAmelCase : Dict , ): """simple docstring""" super().__init__(**_UpperCAmelCase ) UpperCAmelCase__ = image_size UpperCAmelCase__ = patch_size UpperCAmelCase__ = num_channels UpperCAmelCase__ = embed_dim UpperCAmelCase__ = depths UpperCAmelCase__ = len(_UpperCAmelCase ) UpperCAmelCase__ = num_heads UpperCAmelCase__ = window_size UpperCAmelCase__ = mlp_ratio UpperCAmelCase__ = qkv_bias UpperCAmelCase__ = hidden_dropout_prob UpperCAmelCase__ = attention_probs_dropout_prob UpperCAmelCase__ = drop_path_rate UpperCAmelCase__ = hidden_act UpperCAmelCase__ = use_absolute_embeddings UpperCAmelCase__ = layer_norm_eps UpperCAmelCase__ = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model UpperCAmelCase__ = int(embed_dim * 2 ** (len(_UpperCAmelCase ) - 1) ) UpperCAmelCase__ = ["""stem"""] + [f'''stage{idx}''' for idx in range(1 , len(_UpperCAmelCase ) + 1 )] UpperCAmelCase__ , UpperCAmelCase__ = get_aligned_output_features_output_indices( out_features=_UpperCAmelCase , out_indices=_UpperCAmelCase , stage_names=self.stage_names )
346
'''simple docstring''' from math import factorial def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int = 20 ): '''simple docstring''' UpperCAmelCase__ = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1, # 2, 3,... UpperCAmelCase__ = n // 2 return int(factorial(SCREAMING_SNAKE_CASE__ ) / (factorial(SCREAMING_SNAKE_CASE__ ) * factorial(n - k )) ) if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution(2_0)) else: try: UpperCAmelCase_ = int(sys.argv[1]) print(solution(n)) except ValueError: print('Invalid entry - please enter a number.')
346
1
'''simple docstring''' from __future__ import annotations import math def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' if num <= 0: UpperCAmelCase__ = F'''{num}: Invalid input, please enter a positive integer.''' raise ValueError(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = [True] * (num + 1) UpperCAmelCase__ = [] UpperCAmelCase__ = 2 UpperCAmelCase__ = int(math.sqrt(SCREAMING_SNAKE_CASE__ ) ) while start <= end: # If start is a prime if sieve[start] is True: prime.append(SCREAMING_SNAKE_CASE__ ) # Set multiples of start be False for i in range(start * start , num + 1 , SCREAMING_SNAKE_CASE__ ): if sieve[i] is True: UpperCAmelCase__ = False start += 1 for j in range(end + 1 , num + 1 ): if sieve[j] is True: prime.append(SCREAMING_SNAKE_CASE__ ) return prime if __name__ == "__main__": print(prime_sieve(int(input('Enter a positive integer: ').strip())))
346
'''simple docstring''' import json import os import unittest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ : int = MgpstrTokenizer lowerCAmelCase_ : List[str] = False lowerCAmelCase_ : Optional[int] = {} lowerCAmelCase_ : Any = False def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): """simple docstring""" super().setUp() # fmt: off UpperCAmelCase__ = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""] # fmt: on UpperCAmelCase__ = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) ) UpperCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(_UpperCAmelCase ) + """\n""" ) def SCREAMING_SNAKE_CASE__ ( self : List[str] , **_UpperCAmelCase : Optional[Any] ): """simple docstring""" return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : Tuple ): """simple docstring""" UpperCAmelCase__ = """tester""" UpperCAmelCase__ = """tester""" return input_text, output_text @unittest.skip("""MGP-STR always lower cases letters.""" ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" pass def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" UpperCAmelCase__ = self.get_tokenizers(do_lower_case=_UpperCAmelCase ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): UpperCAmelCase__ = """[SPECIAL_TOKEN]""" tokenizer.add_special_tokens({"""cls_token""": special_token} ) UpperCAmelCase__ = tokenizer.encode([special_token] , add_special_tokens=_UpperCAmelCase ) self.assertEqual(len(_UpperCAmelCase ) , 1 ) UpperCAmelCase__ = tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase ) self.assertTrue(special_token not in decoded ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" UpperCAmelCase__ = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): UpperCAmelCase__ , UpperCAmelCase__ = self.get_input_output_texts(_UpperCAmelCase ) UpperCAmelCase__ = tokenizer.tokenize(_UpperCAmelCase ) UpperCAmelCase__ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) UpperCAmelCase__ = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = tokenizer.convert_ids_to_tokens(_UpperCAmelCase ) self.assertNotEqual(len(_UpperCAmelCase ) , 0 ) UpperCAmelCase__ = tokenizer.decode(_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase ) self.assertEqual(text_a.replace(""" """ , """""" ) , _UpperCAmelCase ) @unittest.skip("""MGP-STR tokenizer only handles one sequence.""" ) def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" pass @unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""" ) def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" pass
346
1
'''simple docstring''' import json import os import re import sys import urllib.request import requests from bsa import BeautifulSoup UpperCAmelCase_ = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36' ' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582' } def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str = "dhaka" , SCREAMING_SNAKE_CASE__ : int = 5 ): '''simple docstring''' UpperCAmelCase__ = min(SCREAMING_SNAKE_CASE__ , 50 ) # Prevent abuse! UpperCAmelCase__ = { """q""": query, """tbm""": """isch""", """hl""": """en""", """ijn""": """0""", } UpperCAmelCase__ = requests.get("""https://www.google.com/search""" , params=SCREAMING_SNAKE_CASE__ , headers=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = BeautifulSoup(html.text , """html.parser""" ) UpperCAmelCase__ = """""".join( re.findall(r"""AF_initDataCallback\(([^<]+)\);""" , str(soup.select("""script""" ) ) ) ) UpperCAmelCase__ = json.dumps(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = json.loads(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = re.findall( r"""\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",""" , SCREAMING_SNAKE_CASE__ , ) if not matched_google_image_data: return 0 UpperCAmelCase__ = re.sub( r"""\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]""" , """""" , str(SCREAMING_SNAKE_CASE__ ) , ) UpperCAmelCase__ = re.findall( r"""(?:'|,),\[\"(https:|http.*?)\",\d+,\d+\]""" , SCREAMING_SNAKE_CASE__ , ) for index, fixed_full_res_image in enumerate(SCREAMING_SNAKE_CASE__ ): if index >= max_images: return index UpperCAmelCase__ = bytes(SCREAMING_SNAKE_CASE__ , """ascii""" ).decode( """unicode-escape""" ) UpperCAmelCase__ = bytes(SCREAMING_SNAKE_CASE__ , """ascii""" ).decode( """unicode-escape""" ) UpperCAmelCase__ = urllib.request.build_opener() UpperCAmelCase__ = [ ( """User-Agent""", """Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36""" """ (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582""", ) ] urllib.request.install_opener(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = F'''query_{query.replace(' ' , '_' )}''' if not os.path.exists(SCREAMING_SNAKE_CASE__ ): os.makedirs(SCREAMING_SNAKE_CASE__ ) urllib.request.urlretrieve( # noqa: S310 SCREAMING_SNAKE_CASE__ , F'''{path_name}/original_size_img_{index}.jpg''' ) return index if __name__ == "__main__": try: UpperCAmelCase_ = download_images_from_google_query(sys.argv[1]) print(f"{image_count} images were downloaded to disk.") except IndexError: print('Please provide a search term.') raise
346
'''simple docstring''' from abc import ABC, abstractmethod from typing import List, Optional class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' def __init__( self : Union[str, Any] ): """simple docstring""" self.test() def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" UpperCAmelCase__ = 0 UpperCAmelCase__ = False while not completed: if counter == 1: self.reset() UpperCAmelCase__ = self.advance() if not self.does_advance(_UpperCAmelCase ): raise Exception( """Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.""" ) UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.update(_UpperCAmelCase ) counter += 1 if counter > 1_00_00: raise Exception("""update() does not fulfill the constraint.""" ) if self.remaining() != 0: raise Exception("""Custom Constraint is not defined correctly.""" ) @abstractmethod def SCREAMING_SNAKE_CASE__ ( self : Dict ): """simple docstring""" raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : int ): """simple docstring""" raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def SCREAMING_SNAKE_CASE__ ( self : Any , _UpperCAmelCase : int ): """simple docstring""" raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): """simple docstring""" raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : List[Any]=False ): """simple docstring""" raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' def __init__( self : Union[str, Any] , _UpperCAmelCase : List[int] ): """simple docstring""" super(_UpperCAmelCase , self ).__init__() if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or len(_UpperCAmelCase ) == 0: raise ValueError(f'''`token_ids` has to be a non-empty list, but is {token_ids}.''' ) if any((not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or token_id < 0) for token_id in token_ids ): raise ValueError(f'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' ) UpperCAmelCase__ = token_ids UpperCAmelCase__ = len(self.token_ids ) UpperCAmelCase__ = -1 # the index of the currently fulfilled step UpperCAmelCase__ = False def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" if self.completed: return None return self.token_ids[self.fulfilled_idx + 1] def SCREAMING_SNAKE_CASE__ ( self : List[str] , _UpperCAmelCase : int ): """simple docstring""" if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(_UpperCAmelCase )}''' ) if self.completed: return False return token_id == self.token_ids[self.fulfilled_idx + 1] def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , _UpperCAmelCase : int ): """simple docstring""" if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(_UpperCAmelCase )}''' ) UpperCAmelCase__ = False UpperCAmelCase__ = False UpperCAmelCase__ = False if self.does_advance(_UpperCAmelCase ): self.fulfilled_idx += 1 UpperCAmelCase__ = True if self.fulfilled_idx == (self.seqlen - 1): UpperCAmelCase__ = True UpperCAmelCase__ = completed else: # failed to make progress. UpperCAmelCase__ = True self.reset() return stepped, completed, reset def SCREAMING_SNAKE_CASE__ ( self : Tuple ): """simple docstring""" UpperCAmelCase__ = False UpperCAmelCase__ = 0 def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" return self.seqlen - (self.fulfilled_idx + 1) def SCREAMING_SNAKE_CASE__ ( self : Any , _UpperCAmelCase : Optional[int]=False ): """simple docstring""" UpperCAmelCase__ = PhrasalConstraint(self.token_ids ) if stateful: UpperCAmelCase__ = self.seqlen UpperCAmelCase__ = self.fulfilled_idx UpperCAmelCase__ = self.completed return new_constraint class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Any , _UpperCAmelCase : List[List[int]] , _UpperCAmelCase : List[str]=True ): """simple docstring""" UpperCAmelCase__ = max([len(_UpperCAmelCase ) for one in nested_token_ids] ) UpperCAmelCase__ = {} for token_ids in nested_token_ids: UpperCAmelCase__ = root for tidx, token_id in enumerate(_UpperCAmelCase ): if token_id not in level: UpperCAmelCase__ = {} UpperCAmelCase__ = level[token_id] if no_subsets and self.has_subsets(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError( """Each list in `nested_token_ids` can't be a complete subset of another list, but is""" f''' {nested_token_ids}.''' ) UpperCAmelCase__ = root def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : int ): """simple docstring""" UpperCAmelCase__ = self.trie for current_token in current_seq: UpperCAmelCase__ = start[current_token] UpperCAmelCase__ = list(start.keys() ) return next_tokens def SCREAMING_SNAKE_CASE__ ( self : str , _UpperCAmelCase : Tuple ): """simple docstring""" UpperCAmelCase__ = self.next_tokens(_UpperCAmelCase ) return len(_UpperCAmelCase ) == 0 def SCREAMING_SNAKE_CASE__ ( self : List[str] , _UpperCAmelCase : Optional[int] ): """simple docstring""" UpperCAmelCase__ = list(root.values() ) if len(_UpperCAmelCase ) == 0: return 1 else: return sum([self.count_leaves(_UpperCAmelCase ) for nn in next_nodes] ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict ): """simple docstring""" UpperCAmelCase__ = self.count_leaves(_UpperCAmelCase ) return len(_UpperCAmelCase ) != leaf_count class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' def __init__( self : Dict , _UpperCAmelCase : List[List[int]] ): """simple docstring""" super(_UpperCAmelCase , self ).__init__() if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or len(_UpperCAmelCase ) == 0: raise ValueError(f'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' ) if any(not isinstance(_UpperCAmelCase , _UpperCAmelCase ) for token_ids in nested_token_ids ): raise ValueError(f'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' ) if any( any((not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or token_id < 0) for token_id in token_ids ) for token_ids in nested_token_ids ): raise ValueError( f'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' ) UpperCAmelCase__ = DisjunctiveTrie(_UpperCAmelCase ) UpperCAmelCase__ = nested_token_ids UpperCAmelCase__ = self.trie.max_height UpperCAmelCase__ = [] UpperCAmelCase__ = False def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" UpperCAmelCase__ = self.trie.next_tokens(self.current_seq ) if len(_UpperCAmelCase ) == 0: return None else: return token_list def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : int ): """simple docstring""" if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(_UpperCAmelCase )}''' ) UpperCAmelCase__ = self.trie.next_tokens(self.current_seq ) return token_id in next_tokens def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : int ): """simple docstring""" if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(_UpperCAmelCase )}''' ) UpperCAmelCase__ = False UpperCAmelCase__ = False UpperCAmelCase__ = False if self.does_advance(_UpperCAmelCase ): self.current_seq.append(_UpperCAmelCase ) UpperCAmelCase__ = True else: UpperCAmelCase__ = True self.reset() UpperCAmelCase__ = self.trie.reached_leaf(self.current_seq ) UpperCAmelCase__ = completed return stepped, completed, reset def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" UpperCAmelCase__ = False UpperCAmelCase__ = [] def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" if self.completed: # since this can be completed without reaching max height return 0 else: return self.seqlen - len(self.current_seq ) def SCREAMING_SNAKE_CASE__ ( self : Tuple , _UpperCAmelCase : Dict=False ): """simple docstring""" UpperCAmelCase__ = DisjunctiveConstraint(self.token_ids ) if stateful: UpperCAmelCase__ = self.seqlen UpperCAmelCase__ = self.current_seq UpperCAmelCase__ = self.completed return new_constraint class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Optional[int] , _UpperCAmelCase : List[Constraint] ): """simple docstring""" UpperCAmelCase__ = constraints # max # of steps required to fulfill a given constraint UpperCAmelCase__ = max([c.seqlen for c in constraints] ) UpperCAmelCase__ = len(_UpperCAmelCase ) UpperCAmelCase__ = False self.init_state() def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" UpperCAmelCase__ = [] UpperCAmelCase__ = None UpperCAmelCase__ = [constraint.copy(stateful=_UpperCAmelCase ) for constraint in self.constraints] def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" UpperCAmelCase__ = 0 if self.inprogress_constraint: # extra points for having a constraint mid-fulfilled add += self.max_seqlen - self.inprogress_constraint.remaining() return (len(self.complete_constraints ) * self.max_seqlen) + add def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): """simple docstring""" UpperCAmelCase__ = [] if self.inprogress_constraint is None: for constraint in self.pending_constraints: # "pending" == "unfulfilled yet" UpperCAmelCase__ = constraint.advance() if isinstance(_UpperCAmelCase , _UpperCAmelCase ): token_list.append(_UpperCAmelCase ) elif isinstance(_UpperCAmelCase , _UpperCAmelCase ): token_list.extend(_UpperCAmelCase ) else: UpperCAmelCase__ = self.inprogress_constraint.advance() if isinstance(_UpperCAmelCase , _UpperCAmelCase ): token_list.append(_UpperCAmelCase ) elif isinstance(_UpperCAmelCase , _UpperCAmelCase ): token_list.extend(_UpperCAmelCase ) if len(_UpperCAmelCase ) == 0: return None else: return token_list def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : Optional[List[int]] ): """simple docstring""" self.init_state() if token_ids is not None: for token in token_ids: # completes or steps **one** constraint UpperCAmelCase__ , UpperCAmelCase__ = self.add(_UpperCAmelCase ) # the entire list of constraints are fulfilled if self.completed: break def SCREAMING_SNAKE_CASE__ ( self : Any , _UpperCAmelCase : int ): """simple docstring""" if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError(f'''`token_id` should be an `int`, but is `{token_id}`.''' ) UpperCAmelCase__ , UpperCAmelCase__ = False, False if self.completed: UpperCAmelCase__ = True UpperCAmelCase__ = False return complete, stepped if self.inprogress_constraint is not None: # In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current # job, simply update the state UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.inprogress_constraint.update(_UpperCAmelCase ) if reset: # 1. If the next token breaks the progress, then we must restart. # e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books". # But that doesn't mean we self.init_state(), since we only reset the state for this particular # constraint, not the full list of constraints. self.pending_constraints.append(self.inprogress_constraint.copy(stateful=_UpperCAmelCase ) ) UpperCAmelCase__ = None if complete: # 2. If the next token completes the constraint, move it to completed list, set # inprogress to None. If there are no pending constraints either, then this full list of constraints # is complete. self.complete_constraints.append(self.inprogress_constraint ) UpperCAmelCase__ = None if len(self.pending_constraints ) == 0: # we're done! UpperCAmelCase__ = True else: # Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list # of constraints? for cidx, pending_constraint in enumerate(self.pending_constraints ): if pending_constraint.does_advance(_UpperCAmelCase ): UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = pending_constraint.update(_UpperCAmelCase ) if not stepped: raise Exception( """`constraint.update(token_id)` is not yielding incremental progress, """ """even though `constraint.does_advance(token_id)` is true.""" ) if complete: self.complete_constraints.append(_UpperCAmelCase ) UpperCAmelCase__ = None if not complete and stepped: UpperCAmelCase__ = pending_constraint if complete or stepped: # If we made any progress at all, then it's at least not a "pending constraint". UpperCAmelCase__ = ( self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :] ) if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None: # If there's no longer any pending after this and no inprogress either, then we must be # complete. UpperCAmelCase__ = True break # prevent accidentally stepping through multiple constraints with just one token. return complete, stepped def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : List[Any]=True ): """simple docstring""" UpperCAmelCase__ = ConstraintListState(self.constraints ) # we actually never though self.constraints objects # throughout this process. So it's at initialization state. if stateful: UpperCAmelCase__ = [ constraint.copy(stateful=_UpperCAmelCase ) for constraint in self.complete_constraints ] if self.inprogress_constraint is not None: UpperCAmelCase__ = self.inprogress_constraint.copy(stateful=_UpperCAmelCase ) UpperCAmelCase__ = [constraint.copy() for constraint in self.pending_constraints] return new_state
346
1
'''simple docstring''' import unittest from transformers import BertGenerationTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin UpperCAmelCase_ = '▁' UpperCAmelCase_ = get_tests_dir('fixtures/test_sentencepiece.model') @require_sentencepiece class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ : List[str] = BertGenerationTokenizer lowerCAmelCase_ : str = False lowerCAmelCase_ : Any = True def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" super().setUp() UpperCAmelCase__ = BertGenerationTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase ) tokenizer.save_pretrained(self.tmpdirname ) def SCREAMING_SNAKE_CASE__ ( self : int ): """simple docstring""" UpperCAmelCase__ = """<s>""" UpperCAmelCase__ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Dict ): """simple docstring""" UpperCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<unk>""" ) self.assertEqual(vocab_keys[1] , """<s>""" ) self.assertEqual(vocab_keys[-1] , """<pad>""" ) self.assertEqual(len(_UpperCAmelCase ) , 10_02 ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ): """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 10_00 ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" UpperCAmelCase__ = BertGenerationTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase ) UpperCAmelCase__ = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(_UpperCAmelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [2_85, 46, 10, 1_70, 3_82] , ) UpperCAmelCase__ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( _UpperCAmelCase , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) UpperCAmelCase__ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) self.assertListEqual( _UpperCAmelCase , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , ) UpperCAmelCase__ = tokenizer.convert_ids_to_tokens(_UpperCAmelCase ) self.assertListEqual( _UpperCAmelCase , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) @cached_property def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" return BertGenerationTokenizer.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" ) @slow def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" UpperCAmelCase__ = """Hello World!""" UpperCAmelCase__ = [1_85_36, 22_60, 1_01] self.assertListEqual(_UpperCAmelCase , self.big_tokenizer.encode(_UpperCAmelCase ) ) @slow def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" UpperCAmelCase__ = ( """This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will""" """ add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth""" ) UpperCAmelCase__ = [ 8_71, 4_19, 3_58, 9_46, 9_91, 25_21, 4_52, 3_58, 13_57, 3_87, 77_51, 35_36, 1_12, 9_85, 4_56, 1_26, 8_65, 9_38, 54_00, 57_34, 4_58, 13_68, 4_67, 7_86, 24_62, 52_46, 11_59, 6_33, 8_65, 45_19, 4_57, 5_82, 8_52, 25_57, 4_27, 9_16, 5_08, 4_05, 3_43_24, 4_97, 3_91, 4_08, 1_13_42, 12_44, 3_85, 1_00, 9_38, 9_85, 4_56, 5_74, 3_62, 1_25_97, 32_00, 31_29, 11_72, ] self.assertListEqual(_UpperCAmelCase , self.big_tokenizer.encode(_UpperCAmelCase ) ) @require_torch @slow def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): """simple docstring""" import torch from transformers import BertGenerationConfig, BertGenerationEncoder # Build sequence UpperCAmelCase__ = list(self.big_tokenizer.get_vocab().keys() )[:10] UpperCAmelCase__ = """ """.join(_UpperCAmelCase ) UpperCAmelCase__ = self.big_tokenizer.encode_plus(_UpperCAmelCase , return_tensors="""pt""" , return_token_type_ids=_UpperCAmelCase ) UpperCAmelCase__ = self.big_tokenizer.batch_encode_plus( [sequence + """ """ + sequence] , return_tensors="""pt""" , return_token_type_ids=_UpperCAmelCase ) UpperCAmelCase__ = BertGenerationConfig() UpperCAmelCase__ = BertGenerationEncoder(_UpperCAmelCase ) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**_UpperCAmelCase ) model(**_UpperCAmelCase ) @slow def SCREAMING_SNAKE_CASE__ ( self : Tuple ): """simple docstring""" UpperCAmelCase__ = {"""input_ids""": [[3_92_86, 4_58, 3_63_35, 20_01, 4_56, 1_30_73, 1_32_66, 4_55, 1_13, 77_46, 17_41, 1_11_57, 3_91, 1_30_73, 1_32_66, 4_55, 1_13, 39_67, 3_54_12, 1_13, 49_36, 1_09, 38_70, 23_77, 1_13, 3_00_84, 4_57_20, 4_58, 1_34, 1_74_96, 1_12, 5_03, 1_16_72, 1_13, 1_18, 1_12, 56_65, 1_33_47, 3_86_87, 1_12, 14_96, 3_13_89, 1_12, 32_68, 4_72_64, 1_34, 9_62, 1_12, 1_63_77, 80_35, 2_31_30, 4_30, 1_21_69, 1_55_18, 2_85_92, 4_58, 1_46, 4_16_97, 1_09, 3_91, 1_21_69, 1_55_18, 1_66_89, 4_58, 1_46, 4_13_58, 1_09, 4_52, 7_26, 40_34, 1_11, 7_63, 3_54_12, 50_82, 3_88, 19_03, 1_11, 90_51, 3_91, 28_70, 4_89_18, 19_00, 11_23, 5_50, 9_98, 1_12, 95_86, 1_59_85, 4_55, 3_91, 4_10, 2_29_55, 3_76_36, 1_14], [4_48, 1_74_96, 4_19, 36_63, 3_85, 7_63, 1_13, 2_75_33, 28_70, 32_83, 1_30_43, 16_39, 2_47_13, 5_23, 6_56, 2_40_13, 1_85_50, 25_21, 5_17, 2_70_14, 2_12_44, 4_20, 12_12, 14_65, 3_91, 9_27, 48_33, 3_88, 5_78, 1_17_86, 1_14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_84, 21_69, 76_87, 2_19_32, 1_81_46, 7_26, 3_63, 1_70_32, 33_91, 1_14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_UpperCAmelCase , model_name="""google/bert_for_seq_generation_L-24_bbc_encoder""" , revision="""c817d1fd1be2ffa69431227a1fe320544943d4db""" , )
346
'''simple docstring''' import doctest import logging import os import unittest from pathlib import Path from typing import List, Union import transformers from transformers.testing_utils import require_tf, require_torch, slow UpperCAmelCase_ = logging.getLogger() @unittest.skip("""Temporarily disable the doc tests.""" ) @require_torch @require_tf @slow class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : str , _UpperCAmelCase : Path , _UpperCAmelCase : Union[str, None] = None , _UpperCAmelCase : Union[List[str], None] = None , _UpperCAmelCase : Union[str, List[str], None] = None , _UpperCAmelCase : bool = True , ): """simple docstring""" UpperCAmelCase__ = [file for file in os.listdir(_UpperCAmelCase ) if os.path.isfile(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) )] if identifier is not None: UpperCAmelCase__ = [file for file in files if identifier in file] if n_identifier is not None: if isinstance(_UpperCAmelCase , _UpperCAmelCase ): for n_ in n_identifier: UpperCAmelCase__ = [file for file in files if n_ not in file] else: UpperCAmelCase__ = [file for file in files if n_identifier not in file] UpperCAmelCase__ = ignore_files or [] ignore_files.append("""__init__.py""" ) UpperCAmelCase__ = [file for file in files if file not in ignore_files] for file in files: # Open all files print("""Testing""" , _UpperCAmelCase ) if only_modules: UpperCAmelCase__ = file.split(""".""" )[0] try: UpperCAmelCase__ = getattr(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = doctest.DocTestSuite(_UpperCAmelCase ) UpperCAmelCase__ = unittest.TextTestRunner().run(_UpperCAmelCase ) self.assertIs(len(result.failures ) , 0 ) except AttributeError: logger.info(f'''{module_identifier} is not a module.''' ) else: UpperCAmelCase__ = doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS ) self.assertIs(result.failed , 0 ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" UpperCAmelCase__ = Path("""src/transformers""" ) UpperCAmelCase__ = """modeling""" UpperCAmelCase__ = [ """modeling_ctrl.py""", """modeling_tf_ctrl.py""", ] self.analyze_directory(_UpperCAmelCase , identifier=_UpperCAmelCase , ignore_files=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" UpperCAmelCase__ = Path("""src/transformers""" ) UpperCAmelCase__ = """tokenization""" self.analyze_directory(_UpperCAmelCase , identifier=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" UpperCAmelCase__ = Path("""src/transformers""" ) UpperCAmelCase__ = """configuration""" self.analyze_directory(_UpperCAmelCase , identifier=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" UpperCAmelCase__ = Path("""src/transformers""" ) UpperCAmelCase__ = ["""configuration""", """modeling""", """tokenization"""] self.analyze_directory(_UpperCAmelCase , n_identifier=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" UpperCAmelCase__ = Path("""docs/source""" ) UpperCAmelCase__ = ["""favicon.ico"""] self.analyze_directory(_UpperCAmelCase , ignore_files=_UpperCAmelCase , only_modules=_UpperCAmelCase )
346
1
'''simple docstring''' import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to properly calculate the metrics on the # validation dataset when in a distributed system, and builds off the # `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## UpperCAmelCase_ = 1_6 UpperCAmelCase_ = 3_2 def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Accelerator , SCREAMING_SNAKE_CASE__ : int = 16 ): '''simple docstring''' UpperCAmelCase__ = AutoTokenizer.from_pretrained("""bert-base-cased""" ) UpperCAmelCase__ = load_dataset("""glue""" , """mrpc""" ) def tokenize_function(SCREAMING_SNAKE_CASE__ : Any ): # max_length=None => use the model max length (it's actually the default) UpperCAmelCase__ = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): UpperCAmelCase__ = datasets.map( SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library UpperCAmelCase__ = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(SCREAMING_SNAKE_CASE__ : int ): # On TPU it's best to pad everything to the same length or training will be very slow. UpperCAmelCase__ = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": UpperCAmelCase__ = 16 elif accelerator.mixed_precision != "no": UpperCAmelCase__ = 8 else: UpperCAmelCase__ = None return tokenizer.pad( SCREAMING_SNAKE_CASE__ , padding="""longest""" , max_length=SCREAMING_SNAKE_CASE__ , pad_to_multiple_of=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" , ) # Instantiate dataloaders. UpperCAmelCase__ = DataLoader( tokenized_datasets["""train"""] , shuffle=SCREAMING_SNAKE_CASE__ , collate_fn=SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = DataLoader( tokenized_datasets["""validation"""] , shuffle=SCREAMING_SNAKE_CASE__ , collate_fn=SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ ) return train_dataloader, eval_dataloader # For testing only if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1": from accelerate.test_utils.training import mocked_dataloaders UpperCAmelCase_ = mocked_dataloaders # noqa: F811 def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ): '''simple docstring''' if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , SCREAMING_SNAKE_CASE__ ) == "1": UpperCAmelCase__ = 2 # Initialize accelerator UpperCAmelCase__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs UpperCAmelCase__ = config["""lr"""] UpperCAmelCase__ = int(config["""num_epochs"""] ) UpperCAmelCase__ = int(config["""seed"""] ) UpperCAmelCase__ = int(config["""batch_size"""] ) UpperCAmelCase__ = evaluate.load("""glue""" , """mrpc""" ) # If the batch size is too big we use gradient accumulation UpperCAmelCase__ = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: UpperCAmelCase__ = batch_size // MAX_GPU_BATCH_SIZE UpperCAmelCase__ = MAX_GPU_BATCH_SIZE set_seed(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ , UpperCAmelCase__ = get_dataloaders(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) UpperCAmelCase__ = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=SCREAMING_SNAKE_CASE__ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). UpperCAmelCase__ = model.to(accelerator.device ) # Instantiate optimizer UpperCAmelCase__ = AdamW(params=model.parameters() , lr=SCREAMING_SNAKE_CASE__ ) # Instantiate scheduler UpperCAmelCase__ = get_linear_schedule_with_warmup( optimizer=SCREAMING_SNAKE_CASE__ , num_warmup_steps=100 , num_training_steps=(len(SCREAMING_SNAKE_CASE__ ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = accelerator.prepare( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Now we train the model for epoch in range(SCREAMING_SNAKE_CASE__ ): model.train() for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) UpperCAmelCase__ = model(**SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = outputs.loss UpperCAmelCase__ = loss / gradient_accumulation_steps accelerator.backward(SCREAMING_SNAKE_CASE__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() UpperCAmelCase__ = 0 for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): UpperCAmelCase__ = model(**SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = outputs.logits.argmax(dim=-1 ) UpperCAmelCase__ , UpperCAmelCase__ = accelerator.gather((predictions, batch["""labels"""]) ) # New Code # # First we check if it's a distributed system if accelerator.use_distributed: # Then see if we're on the last batch of our eval dataloader if step == len(SCREAMING_SNAKE_CASE__ ) - 1: # Last batch needs to be truncated on distributed systems as it contains additional samples UpperCAmelCase__ = predictions[: len(eval_dataloader.dataset ) - samples_seen] UpperCAmelCase__ = references[: len(eval_dataloader.dataset ) - samples_seen] else: # Otherwise we add the number of samples seen samples_seen += references.shape[0] # All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`: # accelerator.gather_for_metrics((predictions, batch["labels"])) metric.add_batch( predictions=SCREAMING_SNAKE_CASE__ , references=SCREAMING_SNAKE_CASE__ , ) UpperCAmelCase__ = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'''epoch {epoch}:''' , SCREAMING_SNAKE_CASE__ ) def _UpperCamelCase ( ): '''simple docstring''' UpperCAmelCase__ = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" ) UpperCAmelCase__ = parser.parse_args() UpperCAmelCase__ = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": main()
346
'''simple docstring''' from datasets.utils.patching import _PatchedModuleObj, patch_submodule from . import _test_patching def _UpperCamelCase ( ): '''simple docstring''' import os as original_os from os import path as original_path from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join UpperCAmelCase__ = """__test_patch_submodule_mock__""" with patch_submodule(_test_patching , """os.path.join""" , SCREAMING_SNAKE_CASE__ ): # Every way to access os.path.join must be patched, and the rest must stay untouched # check os.path.join assert isinstance(_test_patching.os , _PatchedModuleObj ) assert isinstance(_test_patching.os.path , _PatchedModuleObj ) assert _test_patching.os.path.join is mock # check path.join assert isinstance(_test_patching.path , _PatchedModuleObj ) assert _test_patching.path.join is mock # check join assert _test_patching.join is mock # check that the other attributes are untouched assert _test_patching.os.rename is original_rename assert _test_patching.path.dirname is original_dirname assert _test_patching.os.path.dirname is original_dirname # Even renamed modules or objects must be patched # check renamed_os.path.join assert isinstance(_test_patching.renamed_os , _PatchedModuleObj ) assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj ) assert _test_patching.renamed_os.path.join is mock # check renamed_path.join assert isinstance(_test_patching.renamed_path , _PatchedModuleObj ) assert _test_patching.renamed_path.join is mock # check renamed_join assert _test_patching.renamed_join is mock # check that the other attributes are untouched assert _test_patching.renamed_os.rename is original_rename assert _test_patching.renamed_path.dirname is original_dirname assert _test_patching.renamed_os.path.dirname is original_dirname # check that everthing is back to normal when the patch is over assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join def _UpperCamelCase ( ): '''simple docstring''' assert _test_patching.open is open UpperCAmelCase__ = """__test_patch_submodule_builtin_mock__""" # _test_patching has "open" in its globals assert _test_patching.open is open with patch_submodule(_test_patching , """open""" , SCREAMING_SNAKE_CASE__ ): assert _test_patching.open is mock # check that everthing is back to normal when the patch is over assert _test_patching.open is open def _UpperCamelCase ( ): '''simple docstring''' UpperCAmelCase__ = """__test_patch_submodule_missing_mock__""" with patch_submodule(_test_patching , """pandas.read_csv""" , SCREAMING_SNAKE_CASE__ ): pass def _UpperCamelCase ( ): '''simple docstring''' UpperCAmelCase__ = """__test_patch_submodule_missing_builtin_mock__""" # _test_patching doesn't have "len" in its globals assert getattr(_test_patching , """len""" , SCREAMING_SNAKE_CASE__ ) is None with patch_submodule(_test_patching , """len""" , SCREAMING_SNAKE_CASE__ ): assert _test_patching.len is mock assert _test_patching.len is len def _UpperCamelCase ( ): '''simple docstring''' UpperCAmelCase__ = """__test_patch_submodule_start_and_stop_mock__""" UpperCAmelCase__ = patch_submodule(_test_patching , """open""" , SCREAMING_SNAKE_CASE__ ) assert _test_patching.open is open patch.start() assert _test_patching.open is mock patch.stop() assert _test_patching.open is open def _UpperCamelCase ( ): '''simple docstring''' from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join UpperCAmelCase__ = """__test_patch_submodule_successive_join__""" UpperCAmelCase__ = """__test_patch_submodule_successive_dirname__""" UpperCAmelCase__ = """__test_patch_submodule_successive_rename__""" assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename with patch_submodule(_test_patching , """os.path.join""" , SCREAMING_SNAKE_CASE__ ): with patch_submodule(_test_patching , """os.rename""" , SCREAMING_SNAKE_CASE__ ): with patch_submodule(_test_patching , """os.path.dirname""" , SCREAMING_SNAKE_CASE__ ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename # try another order with patch_submodule(_test_patching , """os.rename""" , SCREAMING_SNAKE_CASE__ ): with patch_submodule(_test_patching , """os.path.join""" , SCREAMING_SNAKE_CASE__ ): with patch_submodule(_test_patching , """os.path.dirname""" , SCREAMING_SNAKE_CASE__ ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename def _UpperCamelCase ( ): '''simple docstring''' UpperCAmelCase__ = """__test_patch_submodule_doesnt_exist_mock__""" with patch_submodule(_test_patching , """__module_that_doesn_exist__.__attribute_that_doesn_exist__""" , SCREAMING_SNAKE_CASE__ ): pass with patch_submodule(_test_patching , """os.__attribute_that_doesn_exist__""" , SCREAMING_SNAKE_CASE__ ): pass
346
1
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConfig, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaForPreTraining, WavaVecaProcessor, logging, ) from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification logging.set_verbosity_info() UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'adapter_layer': 'encoder.layers.*.adapter_layer', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', 'pooling_layer.linear': 'projector', 'pooling_layer.projection': 'classifier', } UpperCAmelCase_ = [ 'lm_head', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', 'projector', 'classifier', ] def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Tuple ): '''simple docstring''' UpperCAmelCase__ = {} with open(SCREAMING_SNAKE_CASE__ , """r""" ) as file: for line_number, line in enumerate(SCREAMING_SNAKE_CASE__ ): UpperCAmelCase__ = line.strip() if line: UpperCAmelCase__ = line.split() UpperCAmelCase__ = line_number UpperCAmelCase__ = words[0] UpperCAmelCase__ = value return result def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ): '''simple docstring''' for attribute in key.split(""".""" ): UpperCAmelCase__ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(SCREAMING_SNAKE_CASE__ ): UpperCAmelCase__ = PARAM_MAPPING[full_name.split(""".""" )[-1]] UpperCAmelCase__ = """param""" if weight_type is not None and weight_type != "param": UpperCAmelCase__ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).shape elif weight_type is not None and weight_type == "param": UpperCAmelCase__ = hf_pointer for attribute in hf_param_name.split(""".""" ): UpperCAmelCase__ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = shape_pointer.shape # let's reduce dimension UpperCAmelCase__ = value[0] else: UpperCAmelCase__ = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": UpperCAmelCase__ = value elif weight_type == "weight_g": UpperCAmelCase__ = value elif weight_type == "weight_v": UpperCAmelCase__ = value elif weight_type == "bias": UpperCAmelCase__ = value elif weight_type == "param": for attribute in hf_param_name.split(""".""" ): UpperCAmelCase__ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = value else: UpperCAmelCase__ = value logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' ) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(SCREAMING_SNAKE_CASE__ ): UpperCAmelCase__ = PARAM_MAPPING[full_name.split(""".""" )[-1]] UpperCAmelCase__ = """param""" if weight_type is not None and weight_type != "param": UpperCAmelCase__ = """.""".join([key, weight_type] ) elif weight_type is not None and weight_type == "param": UpperCAmelCase__ = """.""".join([key, hf_param_name] ) else: UpperCAmelCase__ = key UpperCAmelCase__ = value if """lm_head""" in full_key else value[0] UpperCAmelCase_ = { 'W_a': 'linear_1.weight', 'W_b': 'linear_2.weight', 'b_a': 'linear_1.bias', 'b_b': 'linear_2.bias', 'ln_W': 'norm.weight', 'ln_b': 'norm.bias', } def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None ): '''simple docstring''' UpperCAmelCase__ = False for key, mapped_key in MAPPING.items(): UpperCAmelCase__ = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: UpperCAmelCase__ = True if "*" in mapped_key: UpperCAmelCase__ = name.split(SCREAMING_SNAKE_CASE__ )[0].split(""".""" )[-2] UpperCAmelCase__ = mapped_key.replace("""*""" , SCREAMING_SNAKE_CASE__ ) if "weight_g" in name: UpperCAmelCase__ = """weight_g""" elif "weight_v" in name: UpperCAmelCase__ = """weight_v""" elif "bias" in name: UpperCAmelCase__ = """bias""" elif "weight" in name: # TODO: don't match quantizer.weight_proj UpperCAmelCase__ = """weight""" else: UpperCAmelCase__ = None if hf_dict is not None: rename_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else: set_recursively(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return is_used return is_used def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] ): '''simple docstring''' UpperCAmelCase__ = [] UpperCAmelCase__ = fairseq_model.state_dict() UpperCAmelCase__ = hf_model.wavaveca.feature_extractor for name, value in fairseq_dict.items(): UpperCAmelCase__ = False if "conv_layers" in name: load_conv_layer( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , hf_model.config.feat_extract_norm == """group""" , ) UpperCAmelCase__ = True else: UpperCAmelCase__ = load_wavaveca_layer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if not is_used: unused_weights.append(SCREAMING_SNAKE_CASE__ ) logger.warning(F'''Unused weights: {unused_weights}''' ) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' UpperCAmelCase__ = full_name.split("""conv_layers.""" )[-1] UpperCAmelCase__ = name.split(""".""" ) UpperCAmelCase__ = int(items[0] ) UpperCAmelCase__ = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) UpperCAmelCase__ = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) UpperCAmelCase__ = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' ) UpperCAmelCase__ = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' ) UpperCAmelCase__ = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(SCREAMING_SNAKE_CASE__ ) @torch.no_grad() def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False ): '''simple docstring''' if config_path is not None: UpperCAmelCase__ = WavaVecaConfig.from_pretrained(SCREAMING_SNAKE_CASE__ ) else: UpperCAmelCase__ = WavaVecaConfig() if is_seq_class: UpperCAmelCase__ = read_txt_into_dict(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = idalabel UpperCAmelCase__ = WavaVecaForSequenceClassification(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , ) feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE__ ) elif is_finetuned: if dict_path: UpperCAmelCase__ = Dictionary.load(SCREAMING_SNAKE_CASE__ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq UpperCAmelCase__ = target_dict.pad_index UpperCAmelCase__ = target_dict.bos_index UpperCAmelCase__ = target_dict.eos_index UpperCAmelCase__ = len(target_dict.symbols ) UpperCAmelCase__ = os.path.join(SCREAMING_SNAKE_CASE__ , """vocab.json""" ) if not os.path.isdir(SCREAMING_SNAKE_CASE__ ): logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(SCREAMING_SNAKE_CASE__ ) ) return os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = target_dict.indices # fairseq has the <pad> and <s> switched UpperCAmelCase__ = 0 UpperCAmelCase__ = 1 with open(SCREAMING_SNAKE_CASE__ , """w""" , encoding="""utf-8""" ) as vocab_handle: json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = WavaVecaCTCTokenizer( SCREAMING_SNAKE_CASE__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=SCREAMING_SNAKE_CASE__ , ) UpperCAmelCase__ = True if config.feat_extract_norm == """layer""" else False UpperCAmelCase__ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , ) UpperCAmelCase__ = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ ) processor.save_pretrained(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = WavaVecaForCTC(SCREAMING_SNAKE_CASE__ ) else: UpperCAmelCase__ = WavaVecaForPreTraining(SCREAMING_SNAKE_CASE__ ) if is_finetuned or is_seq_class: UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) else: UpperCAmelCase__ = argparse.Namespace(task="""audio_pretraining""" ) UpperCAmelCase__ = fairseq.tasks.setup_task(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = model[0].eval() recursively_load_weights(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , not is_finetuned ) hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not' ) parser.add_argument( '--is_seq_class', action='store_true', help='Whether the model to convert is a fine-tuned sequence classification model or not', ) UpperCAmelCase_ = parser.parse_args() UpperCAmelCase_ = not args.not_finetuned and not args.is_seq_class convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, is_finetuned, args.is_seq_class, )
346
'''simple docstring''' from timeit import timeit UpperCAmelCase_ = { 'MALAYALAM': True, 'String': False, 'rotor': True, 'level': True, 'A': True, 'BB': True, 'ABC': False, 'amanaplanacanalpanama': True, # "a man a plan a canal panama" } # Ensure our test data is valid assert all((key == key[::-1]) is value for key, value in test_data.items()) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' UpperCAmelCase__ = 0 UpperCAmelCase__ = len(SCREAMING_SNAKE_CASE__ ) - 1 while start_i < end_i: if s[start_i] == s[end_i]: start_i += 1 end_i -= 1 else: return False return True def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' UpperCAmelCase__ = len(SCREAMING_SNAKE_CASE__ ) // 2 UpperCAmelCase__ = len(SCREAMING_SNAKE_CASE__ ) # We need to traverse till half of the length of string # as we can get access of the i'th last element from # i'th index. # eg: [0,1,2,3,4,5] => 4th index can be accessed # with the help of 1st index (i==n-i-1) # where n is length of string return all(s[i] == s[n - i - 1] for i in range(SCREAMING_SNAKE_CASE__ ) ) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' if len(SCREAMING_SNAKE_CASE__ ) <= 2: return True if s[0] == s[len(SCREAMING_SNAKE_CASE__ ) - 1]: return is_palindrome_recursive(s[1:-1] ) else: return False def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' return s == s[::-1] def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' UpperCAmelCase__ = F'''all({name}(key) is value for key, value in test_data.items())''' UpperCAmelCase__ = F'''from __main__ import test_data, {name}''' UpperCAmelCase__ = 500000 UpperCAmelCase__ = timeit(stmt=SCREAMING_SNAKE_CASE__ , setup=SCREAMING_SNAKE_CASE__ , number=SCREAMING_SNAKE_CASE__ ) print(F'''{name:<35} finished {number:,} runs in {result:.5f} seconds''' ) if __name__ == "__main__": for key, value in test_data.items(): assert is_palindrome(key) is is_palindrome_recursive(key) assert is_palindrome(key) is is_palindrome_slice(key) print(f"{key:21} {value}") print('a man a plan a canal panama') # finished 500,000 runs in 0.46793 seconds benchmark_function('is_palindrome_slice') # finished 500,000 runs in 0.85234 seconds benchmark_function('is_palindrome') # finished 500,000 runs in 1.32028 seconds benchmark_function('is_palindrome_recursive') # finished 500,000 runs in 2.08679 seconds benchmark_function('is_palindrome_traversal')
346
1
'''simple docstring''' import json import os import unittest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ : int = MgpstrTokenizer lowerCAmelCase_ : List[str] = False lowerCAmelCase_ : Optional[int] = {} lowerCAmelCase_ : Any = False def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): """simple docstring""" super().setUp() # fmt: off UpperCAmelCase__ = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""] # fmt: on UpperCAmelCase__ = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) ) UpperCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(_UpperCAmelCase ) + """\n""" ) def SCREAMING_SNAKE_CASE__ ( self : List[str] , **_UpperCAmelCase : Optional[Any] ): """simple docstring""" return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : Tuple ): """simple docstring""" UpperCAmelCase__ = """tester""" UpperCAmelCase__ = """tester""" return input_text, output_text @unittest.skip("""MGP-STR always lower cases letters.""" ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" pass def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" UpperCAmelCase__ = self.get_tokenizers(do_lower_case=_UpperCAmelCase ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): UpperCAmelCase__ = """[SPECIAL_TOKEN]""" tokenizer.add_special_tokens({"""cls_token""": special_token} ) UpperCAmelCase__ = tokenizer.encode([special_token] , add_special_tokens=_UpperCAmelCase ) self.assertEqual(len(_UpperCAmelCase ) , 1 ) UpperCAmelCase__ = tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase ) self.assertTrue(special_token not in decoded ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" UpperCAmelCase__ = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): UpperCAmelCase__ , UpperCAmelCase__ = self.get_input_output_texts(_UpperCAmelCase ) UpperCAmelCase__ = tokenizer.tokenize(_UpperCAmelCase ) UpperCAmelCase__ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) UpperCAmelCase__ = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = tokenizer.convert_ids_to_tokens(_UpperCAmelCase ) self.assertNotEqual(len(_UpperCAmelCase ) , 0 ) UpperCAmelCase__ = tokenizer.decode(_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase ) self.assertEqual(text_a.replace(""" """ , """""" ) , _UpperCAmelCase ) @unittest.skip("""MGP-STR tokenizer only handles one sequence.""" ) def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" pass @unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""" ) def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" pass
346
'''simple docstring''' import datasets from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py UpperCAmelCase_ = '\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",\n author = "Lin, Chin-Yew and\n Och, Franz Josef",\n booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",\n month = "aug 23{--}aug 27",\n year = "2004",\n address = "Geneva, Switzerland",\n publisher = "COLING",\n url = "https://www.aclweb.org/anthology/C04-1072",\n pages = "501--507",\n}\n' UpperCAmelCase_ = '\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,\nthe better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n' UpperCAmelCase_ = '\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n \'bleu\': bleu score,\n \'precisions\': geometric mean of n-gram precisions,\n \'brevity_penalty\': brevity penalty,\n \'length_ratio\': ratio of lengths,\n \'translation_length\': translation_length,\n \'reference_length\': reference_length\nExamples:\n\n >>> predictions = [\n ... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample\n ... ["foo", "bar", "foobar"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)\n ... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric("bleu")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results["bleu"])\n 1.0\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase_ ( datasets.Metric ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : int ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ), """references""": datasets.Sequence( datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ), } ) , codebase_urls=["""https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"""] , reference_urls=[ """https://en.wikipedia.org/wiki/BLEU""", """https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""", ] , ) def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any]=4 , _UpperCAmelCase : Union[str, Any]=False ): """simple docstring""" UpperCAmelCase__ = compute_bleu( reference_corpus=_UpperCAmelCase , translation_corpus=_UpperCAmelCase , max_order=_UpperCAmelCase , smooth=_UpperCAmelCase ) ((UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__)) = score return { "bleu": bleu, "precisions": precisions, "brevity_penalty": bp, "length_ratio": ratio, "translation_length": translation_length, "reference_length": reference_length, }
346
1
'''simple docstring''' import requests UpperCAmelCase_ = '' # <-- Put your OpenWeatherMap appid here! UpperCAmelCase_ = 'https://api.openweathermap.org/data/2.5/' def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str = "Chicago" , SCREAMING_SNAKE_CASE__ : str = APPID ): '''simple docstring''' return requests.get(URL_BASE + """weather""" , params=locals() ).json() def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str = "Kolkata, India" , SCREAMING_SNAKE_CASE__ : str = APPID ): '''simple docstring''' return requests.get(URL_BASE + """forecast""" , params=locals() ).json() def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : float = 55.68 , SCREAMING_SNAKE_CASE__ : float = 12.57 , SCREAMING_SNAKE_CASE__ : str = APPID ): '''simple docstring''' return requests.get(URL_BASE + """onecall""" , params=locals() ).json() if __name__ == "__main__": from pprint import pprint while True: UpperCAmelCase_ = input('Enter a location:').strip() if location: pprint(current_weather(location)) else: break
346
'''simple docstring''' from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import torch from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available @dataclass class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' lowerCAmelCase_ : Union[List[np.ndarray], torch.FloatTensor] try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_text_to_video_synth import TextToVideoSDPipeline from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401 from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
346
1
'''simple docstring''' import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' lowerCAmelCase_ : List[str] = ["""image_processor""", """tokenizer"""] lowerCAmelCase_ : Optional[Any] = """LayoutLMv2ImageProcessor""" lowerCAmelCase_ : List[Any] = ("""LayoutXLMTokenizer""", """LayoutXLMTokenizerFast""") def __init__( self : int , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Optional[Any]=None , **_UpperCAmelCase : Optional[int] ): """simple docstring""" if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , _UpperCAmelCase , ) UpperCAmelCase__ = kwargs.pop("""feature_extractor""" ) UpperCAmelCase__ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(_UpperCAmelCase , _UpperCAmelCase ) def __call__( self : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _UpperCAmelCase : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , _UpperCAmelCase : Union[List[List[int]], List[List[List[int]]]] = None , _UpperCAmelCase : Optional[Union[List[int], List[List[int]]]] = None , _UpperCAmelCase : bool = True , _UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , _UpperCAmelCase : Union[bool, str, TruncationStrategy] = None , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : int = 0 , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , **_UpperCAmelCase : List[str] , ): """simple docstring""" if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( """You cannot provide bounding boxes """ """if you initialized the image processor with apply_ocr set to True.""" ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( """You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""" ) if return_overflowing_tokens is True and return_offsets_mapping is False: raise ValueError("""You cannot return overflowing tokens without returning the offsets mapping.""" ) # first, apply the image processor UpperCAmelCase__ = self.image_processor(images=_UpperCAmelCase , return_tensors=_UpperCAmelCase ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(_UpperCAmelCase , _UpperCAmelCase ): UpperCAmelCase__ = [text] # add batch dimension (as the image processor always adds a batch dimension) UpperCAmelCase__ = features["""words"""] UpperCAmelCase__ = self.tokenizer( text=text if text is not None else features["""words"""] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["""boxes"""] , word_labels=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , stride=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_overflowing_tokens=_UpperCAmelCase , return_special_tokens_mask=_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , return_length=_UpperCAmelCase , verbose=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase , ) # add pixel values UpperCAmelCase__ = features.pop("""pixel_values""" ) if return_overflowing_tokens is True: UpperCAmelCase__ = self.get_overflowing_images(_UpperCAmelCase , encoded_inputs["""overflow_to_sample_mapping"""] ) UpperCAmelCase__ = images return encoded_inputs def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] ): """simple docstring""" UpperCAmelCase__ = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(_UpperCAmelCase ) != len(_UpperCAmelCase ): raise ValueError( """Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got""" f''' {len(_UpperCAmelCase )} and {len(_UpperCAmelCase )}''' ) return images_with_overflow def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : int ): """simple docstring""" return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Tuple , *_UpperCAmelCase : List[Any] , **_UpperCAmelCase : List[str] ): """simple docstring""" return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase ) @property def SCREAMING_SNAKE_CASE__ ( self : int ): """simple docstring""" return ["input_ids", "bbox", "attention_mask", "image"] @property def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , _UpperCAmelCase , ) return self.image_processor_class @property def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" warnings.warn( """`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , _UpperCAmelCase , ) return self.image_processor
346
'''simple docstring''' import torch import torch.nn as nn from transformers.modeling_utils import ModuleUtilsMixin from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class lowerCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): '''simple docstring''' @register_to_config def __init__( self : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : float , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : bool = False , ): """simple docstring""" super().__init__() UpperCAmelCase__ = nn.Embedding(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = nn.Embedding(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = False UpperCAmelCase__ = nn.Dropout(p=_UpperCAmelCase ) UpperCAmelCase__ = TaConfig( vocab_size=_UpperCAmelCase , d_model=_UpperCAmelCase , num_heads=_UpperCAmelCase , d_kv=_UpperCAmelCase , d_ff=_UpperCAmelCase , dropout_rate=_UpperCAmelCase , feed_forward_proj=_UpperCAmelCase , is_decoder=_UpperCAmelCase , is_encoder_decoder=_UpperCAmelCase , ) UpperCAmelCase__ = nn.ModuleList() for lyr_num in range(_UpperCAmelCase ): UpperCAmelCase__ = TaBlock(_UpperCAmelCase ) self.encoders.append(_UpperCAmelCase ) UpperCAmelCase__ = TaLayerNorm(_UpperCAmelCase ) UpperCAmelCase__ = nn.Dropout(p=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : int , _UpperCAmelCase : str ): """simple docstring""" UpperCAmelCase__ = self.token_embedder(_UpperCAmelCase ) UpperCAmelCase__ = encoder_input_tokens.shape[1] UpperCAmelCase__ = torch.arange(_UpperCAmelCase , device=encoder_input_tokens.device ) x += self.position_encoding(_UpperCAmelCase ) UpperCAmelCase__ = self.dropout_pre(_UpperCAmelCase ) # inverted the attention mask UpperCAmelCase__ = encoder_input_tokens.size() UpperCAmelCase__ = self.get_extended_attention_mask(_UpperCAmelCase , _UpperCAmelCase ) for lyr in self.encoders: UpperCAmelCase__ = lyr(_UpperCAmelCase , _UpperCAmelCase )[0] UpperCAmelCase__ = self.layer_norm(_UpperCAmelCase ) return self.dropout_post(_UpperCAmelCase ), encoder_inputs_mask
346
1
'''simple docstring''' from __future__ import annotations from collections.abc import Callable from typing import Generic, TypeVar UpperCAmelCase_ = TypeVar('T') UpperCAmelCase_ = TypeVar('U') class lowerCAmelCase_ ( Generic[T, U] ): '''simple docstring''' def __init__( self : Optional[Any] , _UpperCAmelCase : T | None , _UpperCAmelCase : U | None ): """simple docstring""" UpperCAmelCase__ = key UpperCAmelCase__ = val UpperCAmelCase__ = None UpperCAmelCase__ = None def __repr__( self : List[str] ): """simple docstring""" return ( f'''Node: key: {self.key}, val: {self.val}, ''' f'''has next: {bool(self.next )}, has prev: {bool(self.prev )}''' ) class lowerCAmelCase_ ( Generic[T, U] ): '''simple docstring''' def __init__( self : str ): """simple docstring""" UpperCAmelCase__ = DoubleLinkedListNode(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = DoubleLinkedListNode(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ , UpperCAmelCase__ = self.rear, self.head def __repr__( self : List[Any] ): """simple docstring""" UpperCAmelCase__ = ["""DoubleLinkedList"""] UpperCAmelCase__ = self.head while node.next is not None: rep.append(str(_UpperCAmelCase ) ) UpperCAmelCase__ = node.next rep.append(str(self.rear ) ) return ",\n ".join(_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : DoubleLinkedListNode[T, U] ): """simple docstring""" UpperCAmelCase__ = self.rear.prev # All nodes other than self.head are guaranteed to have non-None previous assert previous is not None UpperCAmelCase__ = node UpperCAmelCase__ = previous UpperCAmelCase__ = node UpperCAmelCase__ = self.rear def SCREAMING_SNAKE_CASE__ ( self : List[str] , _UpperCAmelCase : DoubleLinkedListNode[T, U] ): """simple docstring""" if node.prev is None or node.next is None: return None UpperCAmelCase__ = node.next UpperCAmelCase__ = node.prev UpperCAmelCase__ = None UpperCAmelCase__ = None return node class lowerCAmelCase_ ( Generic[T, U] ): '''simple docstring''' lowerCAmelCase_ : dict[Callable[[T], U], LRUCache[T, U]] = {} def __init__( self : int , _UpperCAmelCase : int ): """simple docstring""" UpperCAmelCase__ = DoubleLinkedList() UpperCAmelCase__ = capacity UpperCAmelCase__ = 0 UpperCAmelCase__ = 0 UpperCAmelCase__ = 0 UpperCAmelCase__ = {} def __repr__( self : List[str] ): """simple docstring""" return ( f'''CacheInfo(hits={self.hits}, misses={self.miss}, ''' f'''capacity={self.capacity}, current size={self.num_keys})''' ) def __contains__( self : List[str] , _UpperCAmelCase : T ): """simple docstring""" return key in self.cache def SCREAMING_SNAKE_CASE__ ( self : Any , _UpperCAmelCase : T ): """simple docstring""" if key in self.cache: self.hits += 1 UpperCAmelCase__ = self.cache[key] UpperCAmelCase__ = self.list.remove(self.cache[key] ) assert node == value_node # node is guaranteed not None because it is in self.cache assert node is not None self.list.add(_UpperCAmelCase ) return node.val self.miss += 1 return None def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : T , _UpperCAmelCase : U ): """simple docstring""" if key not in self.cache: if self.num_keys >= self.capacity: # delete first node (oldest) when over capacity UpperCAmelCase__ = self.list.head.next # guaranteed to have a non-None first node when num_keys > 0 # explain to type checker via assertions assert first_node is not None assert first_node.key is not None assert ( self.list.remove(_UpperCAmelCase ) is not None ) # node guaranteed to be in list assert node.key is not None del self.cache[first_node.key] self.num_keys -= 1 UpperCAmelCase__ = DoubleLinkedListNode(_UpperCAmelCase , _UpperCAmelCase ) self.list.add(self.cache[key] ) self.num_keys += 1 else: # bump node to the end of the list, update value UpperCAmelCase__ = self.list.remove(self.cache[key] ) assert node is not None # node guaranteed to be in list UpperCAmelCase__ = value self.list.add(_UpperCAmelCase ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Union[str, Any] , _UpperCAmelCase : int = 1_28 ): """simple docstring""" def cache_decorator_inner(_UpperCAmelCase : Callable[[T], U] ) -> Callable[..., U]: def cache_decorator_wrapper(*_UpperCAmelCase : T ) -> U: if func not in cls.decorator_function_to_instance_map: UpperCAmelCase__ = LRUCache(_UpperCAmelCase ) UpperCAmelCase__ = cls.decorator_function_to_instance_map[func].get(args[0] ) if result is None: UpperCAmelCase__ = func(*_UpperCAmelCase ) cls.decorator_function_to_instance_map[func].put(args[0] , _UpperCAmelCase ) return result def cache_info() -> LRUCache[T, U]: return cls.decorator_function_to_instance_map[func] setattr(_UpperCAmelCase , """cache_info""" , _UpperCAmelCase ) # noqa: B010 return cache_decorator_wrapper return cache_decorator_inner if __name__ == "__main__": import doctest doctest.testmod()
346
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConfig, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaForPreTraining, WavaVecaProcessor, logging, ) from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification logging.set_verbosity_info() UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'adapter_layer': 'encoder.layers.*.adapter_layer', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', 'pooling_layer.linear': 'projector', 'pooling_layer.projection': 'classifier', } UpperCAmelCase_ = [ 'lm_head', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', 'projector', 'classifier', ] def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Tuple ): '''simple docstring''' UpperCAmelCase__ = {} with open(SCREAMING_SNAKE_CASE__ , """r""" ) as file: for line_number, line in enumerate(SCREAMING_SNAKE_CASE__ ): UpperCAmelCase__ = line.strip() if line: UpperCAmelCase__ = line.split() UpperCAmelCase__ = line_number UpperCAmelCase__ = words[0] UpperCAmelCase__ = value return result def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ): '''simple docstring''' for attribute in key.split(""".""" ): UpperCAmelCase__ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(SCREAMING_SNAKE_CASE__ ): UpperCAmelCase__ = PARAM_MAPPING[full_name.split(""".""" )[-1]] UpperCAmelCase__ = """param""" if weight_type is not None and weight_type != "param": UpperCAmelCase__ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).shape elif weight_type is not None and weight_type == "param": UpperCAmelCase__ = hf_pointer for attribute in hf_param_name.split(""".""" ): UpperCAmelCase__ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = shape_pointer.shape # let's reduce dimension UpperCAmelCase__ = value[0] else: UpperCAmelCase__ = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": UpperCAmelCase__ = value elif weight_type == "weight_g": UpperCAmelCase__ = value elif weight_type == "weight_v": UpperCAmelCase__ = value elif weight_type == "bias": UpperCAmelCase__ = value elif weight_type == "param": for attribute in hf_param_name.split(""".""" ): UpperCAmelCase__ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = value else: UpperCAmelCase__ = value logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' ) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(SCREAMING_SNAKE_CASE__ ): UpperCAmelCase__ = PARAM_MAPPING[full_name.split(""".""" )[-1]] UpperCAmelCase__ = """param""" if weight_type is not None and weight_type != "param": UpperCAmelCase__ = """.""".join([key, weight_type] ) elif weight_type is not None and weight_type == "param": UpperCAmelCase__ = """.""".join([key, hf_param_name] ) else: UpperCAmelCase__ = key UpperCAmelCase__ = value if """lm_head""" in full_key else value[0] UpperCAmelCase_ = { 'W_a': 'linear_1.weight', 'W_b': 'linear_2.weight', 'b_a': 'linear_1.bias', 'b_b': 'linear_2.bias', 'ln_W': 'norm.weight', 'ln_b': 'norm.bias', } def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None ): '''simple docstring''' UpperCAmelCase__ = False for key, mapped_key in MAPPING.items(): UpperCAmelCase__ = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: UpperCAmelCase__ = True if "*" in mapped_key: UpperCAmelCase__ = name.split(SCREAMING_SNAKE_CASE__ )[0].split(""".""" )[-2] UpperCAmelCase__ = mapped_key.replace("""*""" , SCREAMING_SNAKE_CASE__ ) if "weight_g" in name: UpperCAmelCase__ = """weight_g""" elif "weight_v" in name: UpperCAmelCase__ = """weight_v""" elif "bias" in name: UpperCAmelCase__ = """bias""" elif "weight" in name: # TODO: don't match quantizer.weight_proj UpperCAmelCase__ = """weight""" else: UpperCAmelCase__ = None if hf_dict is not None: rename_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else: set_recursively(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return is_used return is_used def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] ): '''simple docstring''' UpperCAmelCase__ = [] UpperCAmelCase__ = fairseq_model.state_dict() UpperCAmelCase__ = hf_model.wavaveca.feature_extractor for name, value in fairseq_dict.items(): UpperCAmelCase__ = False if "conv_layers" in name: load_conv_layer( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , hf_model.config.feat_extract_norm == """group""" , ) UpperCAmelCase__ = True else: UpperCAmelCase__ = load_wavaveca_layer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if not is_used: unused_weights.append(SCREAMING_SNAKE_CASE__ ) logger.warning(F'''Unused weights: {unused_weights}''' ) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' UpperCAmelCase__ = full_name.split("""conv_layers.""" )[-1] UpperCAmelCase__ = name.split(""".""" ) UpperCAmelCase__ = int(items[0] ) UpperCAmelCase__ = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) UpperCAmelCase__ = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) UpperCAmelCase__ = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' ) UpperCAmelCase__ = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' ) UpperCAmelCase__ = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(SCREAMING_SNAKE_CASE__ ) @torch.no_grad() def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False ): '''simple docstring''' if config_path is not None: UpperCAmelCase__ = WavaVecaConfig.from_pretrained(SCREAMING_SNAKE_CASE__ ) else: UpperCAmelCase__ = WavaVecaConfig() if is_seq_class: UpperCAmelCase__ = read_txt_into_dict(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = idalabel UpperCAmelCase__ = WavaVecaForSequenceClassification(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , ) feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE__ ) elif is_finetuned: if dict_path: UpperCAmelCase__ = Dictionary.load(SCREAMING_SNAKE_CASE__ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq UpperCAmelCase__ = target_dict.pad_index UpperCAmelCase__ = target_dict.bos_index UpperCAmelCase__ = target_dict.eos_index UpperCAmelCase__ = len(target_dict.symbols ) UpperCAmelCase__ = os.path.join(SCREAMING_SNAKE_CASE__ , """vocab.json""" ) if not os.path.isdir(SCREAMING_SNAKE_CASE__ ): logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(SCREAMING_SNAKE_CASE__ ) ) return os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = target_dict.indices # fairseq has the <pad> and <s> switched UpperCAmelCase__ = 0 UpperCAmelCase__ = 1 with open(SCREAMING_SNAKE_CASE__ , """w""" , encoding="""utf-8""" ) as vocab_handle: json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = WavaVecaCTCTokenizer( SCREAMING_SNAKE_CASE__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=SCREAMING_SNAKE_CASE__ , ) UpperCAmelCase__ = True if config.feat_extract_norm == """layer""" else False UpperCAmelCase__ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , ) UpperCAmelCase__ = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ ) processor.save_pretrained(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = WavaVecaForCTC(SCREAMING_SNAKE_CASE__ ) else: UpperCAmelCase__ = WavaVecaForPreTraining(SCREAMING_SNAKE_CASE__ ) if is_finetuned or is_seq_class: UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) else: UpperCAmelCase__ = argparse.Namespace(task="""audio_pretraining""" ) UpperCAmelCase__ = fairseq.tasks.setup_task(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = model[0].eval() recursively_load_weights(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , not is_finetuned ) hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not' ) parser.add_argument( '--is_seq_class', action='store_true', help='Whether the model to convert is a fine-tuned sequence classification model or not', ) UpperCAmelCase_ = parser.parse_args() UpperCAmelCase_ = not args.not_finetuned and not args.is_seq_class convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, is_finetuned, args.is_seq_class, )
346
1
'''simple docstring''' import unittest import numpy as np import requests from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: UpperCAmelCase_ = False if is_vision_available(): from PIL import Image from transformers import PixaStructImageProcessor class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def __init__( self : Optional[int] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any]=7 , _UpperCAmelCase : Any=3 , _UpperCAmelCase : Tuple=18 , _UpperCAmelCase : str=30 , _UpperCAmelCase : Optional[Any]=4_00 , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : str=None , ): """simple docstring""" UpperCAmelCase__ = size if size is not None else {"""height""": 20, """width""": 20} UpperCAmelCase__ = parent UpperCAmelCase__ = batch_size UpperCAmelCase__ = num_channels UpperCAmelCase__ = image_size UpperCAmelCase__ = min_resolution UpperCAmelCase__ = max_resolution UpperCAmelCase__ = size UpperCAmelCase__ = do_normalize UpperCAmelCase__ = do_convert_rgb UpperCAmelCase__ = [5_12, 10_24, 20_48, 40_96] UpperCAmelCase__ = patch_size if patch_size is not None else {"""height""": 16, """width""": 16} def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb} def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" UpperCAmelCase__ = """https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg""" UpperCAmelCase__ = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw ).convert("""RGB""" ) return raw_image @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , ) @require_torch @require_vision class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ : Any = PixaStructImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE__ ( self : int ): """simple docstring""" UpperCAmelCase__ = PixaStructImageProcessingTester(self ) @property def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE__ ( self : Dict ): """simple docstring""" UpperCAmelCase__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_UpperCAmelCase , """do_normalize""" ) ) self.assertTrue(hasattr(_UpperCAmelCase , """do_convert_rgb""" ) ) def SCREAMING_SNAKE_CASE__ ( self : int ): """simple docstring""" UpperCAmelCase__ = self.image_processor_tester.prepare_dummy_image() UpperCAmelCase__ = self.image_processing_class(**self.image_processor_dict ) UpperCAmelCase__ = 20_48 UpperCAmelCase__ = image_processor(_UpperCAmelCase , return_tensors="""pt""" , max_patches=_UpperCAmelCase ) self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1E-3 , rtol=1E-3 ) ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" UpperCAmelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , Image.Image ) # Test not batched input UpperCAmelCase__ = ( (self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input UpperCAmelCase__ = image_processor( image_inputs[0] , return_tensors="""pt""" , max_patches=_UpperCAmelCase ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched UpperCAmelCase__ = image_processor( _UpperCAmelCase , return_tensors="""pt""" , max_patches=_UpperCAmelCase ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" UpperCAmelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , Image.Image ) # Test not batched input UpperCAmelCase__ = ( (self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""]) * self.image_processor_tester.num_channels ) + 2 UpperCAmelCase__ = True for max_patch in self.image_processor_tester.max_patches: # Test not batched input with self.assertRaises(_UpperCAmelCase ): UpperCAmelCase__ = image_processor( image_inputs[0] , return_tensors="""pt""" , max_patches=_UpperCAmelCase ).flattened_patches UpperCAmelCase__ = """Hello""" UpperCAmelCase__ = image_processor( image_inputs[0] , return_tensors="""pt""" , max_patches=_UpperCAmelCase , header_text=_UpperCAmelCase ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched UpperCAmelCase__ = image_processor( _UpperCAmelCase , return_tensors="""pt""" , max_patches=_UpperCAmelCase , header_text=_UpperCAmelCase ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" UpperCAmelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , np.ndarray ) UpperCAmelCase__ = ( (self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input UpperCAmelCase__ = image_processor( image_inputs[0] , return_tensors="""pt""" , max_patches=_UpperCAmelCase ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched UpperCAmelCase__ = image_processor( _UpperCAmelCase , return_tensors="""pt""" , max_patches=_UpperCAmelCase ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" UpperCAmelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , torch.Tensor ) # Test not batched input UpperCAmelCase__ = ( (self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input UpperCAmelCase__ = image_processor( image_inputs[0] , return_tensors="""pt""" , max_patches=_UpperCAmelCase ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched UpperCAmelCase__ = image_processor( _UpperCAmelCase , return_tensors="""pt""" , max_patches=_UpperCAmelCase ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , ) @require_torch @require_vision class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ : Optional[Any] = PixaStructImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" UpperCAmelCase__ = PixaStructImageProcessingTester(self , num_channels=4 ) UpperCAmelCase__ = 3 @property def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" UpperCAmelCase__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_UpperCAmelCase , """do_normalize""" ) ) self.assertTrue(hasattr(_UpperCAmelCase , """do_convert_rgb""" ) ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ): """simple docstring""" UpperCAmelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , Image.Image ) # Test not batched input UpperCAmelCase__ = ( (self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""]) * (self.image_processor_tester.num_channels - 1) ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input UpperCAmelCase__ = image_processor( image_inputs[0] , return_tensors="""pt""" , max_patches=_UpperCAmelCase ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched UpperCAmelCase__ = image_processor( _UpperCAmelCase , return_tensors="""pt""" , max_patches=_UpperCAmelCase ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
346
'''simple docstring''' import itertools import os from collections import Counter, defaultdict from concurrent.futures import ThreadPoolExecutor, as_completed import numpy as np import datasets from .execute import check_correctness UpperCAmelCase_ = '\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n' UpperCAmelCase_ = '\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper "Evaluating Large Language Models Trained on Code"\n(https://arxiv.org/abs/2107.03374).\n' UpperCAmelCase_ = '\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric("code_eval")\n >>> test_cases = ["assert add(2,3)==5"]\n >>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {\'pass@1\': 0.5, \'pass@2\': 1.0}\n' UpperCAmelCase_ = '\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe "code_eval" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper "Evaluating Large\nLanguage Models Trained on Code" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"\n\n################################################################################\\n' UpperCAmelCase_ = 'The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the "Software"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase_ ( datasets.Metric ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" return datasets.MetricInfo( # This is the description that will appear on the metrics page. description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""" ) ), """references""": datasets.Value("""string""" ), } ) , homepage="""https://github.com/openai/human-eval""" , codebase_urls=["""https://github.com/openai/human-eval"""] , reference_urls=["""https://github.com/openai/human-eval"""] , license=_LICENSE , ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str]=[1, 10, 1_00] , _UpperCAmelCase : Optional[Any]=4 , _UpperCAmelCase : Any=3.0 ): """simple docstring""" if os.getenv("""HF_ALLOW_CODE_EVAL""" , 0 ) != "1": raise ValueError(_WARNING ) if os.name == "nt": raise NotImplementedError("""This metric is currently not supported on Windows.""" ) with ThreadPoolExecutor(max_workers=_UpperCAmelCase ) as executor: UpperCAmelCase__ = [] UpperCAmelCase__ = Counter() UpperCAmelCase__ = 0 UpperCAmelCase__ = defaultdict(_UpperCAmelCase ) for task_id, (candidates, test_case) in enumerate(zip(_UpperCAmelCase , _UpperCAmelCase ) ): for candidate in candidates: UpperCAmelCase__ = candidate + """\n""" + test_case UpperCAmelCase__ = (test_program, timeout, task_id, completion_id[task_id]) UpperCAmelCase__ = executor.submit(_UpperCAmelCase , *_UpperCAmelCase ) futures.append(_UpperCAmelCase ) completion_id[task_id] += 1 n_samples += 1 for future in as_completed(_UpperCAmelCase ): UpperCAmelCase__ = future.result() results[result["task_id"]].append((result["""completion_id"""], result) ) UpperCAmelCase__ , UpperCAmelCase__ = [], [] for result in results.values(): result.sort() UpperCAmelCase__ = [r[1]["""passed"""] for r in result] total.append(len(_UpperCAmelCase ) ) correct.append(sum(_UpperCAmelCase ) ) UpperCAmelCase__ = np.array(_UpperCAmelCase ) UpperCAmelCase__ = np.array(_UpperCAmelCase ) UpperCAmelCase__ = k UpperCAmelCase__ = {f'''pass@{k}''': estimate_pass_at_k(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).mean() for k in ks if (total >= k).all()} return pass_at_k, results def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] ): '''simple docstring''' def estimator(SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> float: if n - c < k: return 1.0 return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): UpperCAmelCase__ = itertools.repeat(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ) else: assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = iter(SCREAMING_SNAKE_CASE__ ) return np.array([estimator(int(SCREAMING_SNAKE_CASE__ ) , int(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) for n, c in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )] )
346
1
'''simple docstring''' import inspect import unittest import numpy as np from transformers import BeitConfig from transformers.testing_utils import require_flax, require_vision, slow from transformers.utils import cached_property, is_flax_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor if is_flax_available(): import jax from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel if is_vision_available(): from PIL import Image from transformers import BeitImageProcessor class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def __init__( self : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[int]=1_00 , _UpperCAmelCase : Optional[int]=13 , _UpperCAmelCase : int=30 , _UpperCAmelCase : List[Any]=2 , _UpperCAmelCase : List[str]=3 , _UpperCAmelCase : Any=True , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : Optional[int]=32 , _UpperCAmelCase : Dict=5 , _UpperCAmelCase : Union[str, Any]=4 , _UpperCAmelCase : List[Any]=37 , _UpperCAmelCase : List[str]="gelu" , _UpperCAmelCase : int=0.1 , _UpperCAmelCase : Tuple=0.1 , _UpperCAmelCase : str=10 , _UpperCAmelCase : Tuple=0.02 , _UpperCAmelCase : Dict=3 , ): """simple docstring""" UpperCAmelCase__ = parent UpperCAmelCase__ = vocab_size UpperCAmelCase__ = batch_size UpperCAmelCase__ = image_size UpperCAmelCase__ = patch_size UpperCAmelCase__ = num_channels UpperCAmelCase__ = is_training UpperCAmelCase__ = use_labels UpperCAmelCase__ = hidden_size UpperCAmelCase__ = num_hidden_layers UpperCAmelCase__ = num_attention_heads UpperCAmelCase__ = intermediate_size UpperCAmelCase__ = hidden_act UpperCAmelCase__ = hidden_dropout_prob UpperCAmelCase__ = attention_probs_dropout_prob UpperCAmelCase__ = type_sequence_label_size UpperCAmelCase__ = initializer_range # in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) UpperCAmelCase__ = (image_size // patch_size) ** 2 UpperCAmelCase__ = num_patches + 1 def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" UpperCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase__ = None if self.use_labels: UpperCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase__ = BeitConfig( vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , ) return config, pixel_values, labels def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] ): """simple docstring""" UpperCAmelCase__ = FlaxBeitModel(config=_UpperCAmelCase ) UpperCAmelCase__ = model(_UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] ): """simple docstring""" UpperCAmelCase__ = FlaxBeitForMaskedImageModeling(config=_UpperCAmelCase ) UpperCAmelCase__ = model(_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : List[str] ): """simple docstring""" UpperCAmelCase__ = self.type_sequence_label_size UpperCAmelCase__ = FlaxBeitForImageClassification(config=_UpperCAmelCase ) UpperCAmelCase__ = model(_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images UpperCAmelCase__ = 1 UpperCAmelCase__ = FlaxBeitForImageClassification(_UpperCAmelCase ) UpperCAmelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCAmelCase__ = model(_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): """simple docstring""" UpperCAmelCase__ = self.prepare_config_and_inputs() ( ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ) = config_and_inputs UpperCAmelCase__ = {"""pixel_values""": pixel_values} return config, inputs_dict @require_flax class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ : Union[str, Any] = ( (FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else () ) def SCREAMING_SNAKE_CASE__ ( self : Dict ): """simple docstring""" UpperCAmelCase__ = FlaxBeitModelTester(self ) UpperCAmelCase__ = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ = model_class(_UpperCAmelCase ) UpperCAmelCase__ = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase__ = [*signature.parameters.keys()] UpperCAmelCase__ = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Dict ): """simple docstring""" UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): UpperCAmelCase__ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = model_class(_UpperCAmelCase ) @jax.jit def model_jitted(_UpperCAmelCase : Optional[int] , **_UpperCAmelCase : Optional[Any] ): return model(pixel_values=_UpperCAmelCase , **_UpperCAmelCase ) with self.subTest("""JIT Enabled""" ): UpperCAmelCase__ = model_jitted(**_UpperCAmelCase ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): UpperCAmelCase__ = model_jitted(**_UpperCAmelCase ).to_tuple() self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) ) for jitted_output, output in zip(_UpperCAmelCase , _UpperCAmelCase ): self.assertEqual(jitted_output.shape , output.shape ) def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase ) @slow def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): """simple docstring""" for model_class_name in self.all_model_classes: UpperCAmelCase__ = model_class_name.from_pretrained("""microsoft/beit-base-patch16-224""" ) UpperCAmelCase__ = model(np.ones((1, 3, 2_24, 2_24) ) ) self.assertIsNotNone(_UpperCAmelCase ) def _UpperCamelCase ( ): '''simple docstring''' UpperCAmelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_vision @require_flax class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): """simple docstring""" return BeitImageProcessor.from_pretrained("""microsoft/beit-base-patch16-224""" ) if is_vision_available() else None @slow def SCREAMING_SNAKE_CASE__ ( self : int ): """simple docstring""" UpperCAmelCase__ = FlaxBeitForMaskedImageModeling.from_pretrained("""microsoft/beit-base-patch16-224-pt22k""" ) UpperCAmelCase__ = self.default_image_processor UpperCAmelCase__ = prepare_img() UpperCAmelCase__ = image_processor(images=_UpperCAmelCase , return_tensors="""np""" ).pixel_values # prepare bool_masked_pos UpperCAmelCase__ = np.ones((1, 1_96) , dtype=_UpperCAmelCase ) # forward pass UpperCAmelCase__ = model(pixel_values=_UpperCAmelCase , bool_masked_pos=_UpperCAmelCase ) UpperCAmelCase__ = outputs.logits # verify the logits UpperCAmelCase__ = (1, 1_96, 81_92) self.assertEqual(logits.shape , _UpperCAmelCase ) UpperCAmelCase__ = np.array( [[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ) self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , _UpperCAmelCase , atol=1E-2 ) ) @slow def SCREAMING_SNAKE_CASE__ ( self : int ): """simple docstring""" UpperCAmelCase__ = FlaxBeitForImageClassification.from_pretrained("""microsoft/beit-base-patch16-224""" ) UpperCAmelCase__ = self.default_image_processor UpperCAmelCase__ = prepare_img() UpperCAmelCase__ = image_processor(images=_UpperCAmelCase , return_tensors="""np""" ) # forward pass UpperCAmelCase__ = model(**_UpperCAmelCase ) UpperCAmelCase__ = outputs.logits # verify the logits UpperCAmelCase__ = (1, 10_00) self.assertEqual(logits.shape , _UpperCAmelCase ) UpperCAmelCase__ = np.array([-1.2385, -1.0987, -1.0108] ) self.assertTrue(np.allclose(logits[0, :3] , _UpperCAmelCase , atol=1E-4 ) ) UpperCAmelCase__ = 2_81 self.assertEqual(logits.argmax(-1 ).item() , _UpperCAmelCase ) @slow def SCREAMING_SNAKE_CASE__ ( self : Tuple ): """simple docstring""" UpperCAmelCase__ = FlaxBeitForImageClassification.from_pretrained("""microsoft/beit-large-patch16-224-pt22k-ft22k""" ) UpperCAmelCase__ = self.default_image_processor UpperCAmelCase__ = prepare_img() UpperCAmelCase__ = image_processor(images=_UpperCAmelCase , return_tensors="""np""" ) # forward pass UpperCAmelCase__ = model(**_UpperCAmelCase ) UpperCAmelCase__ = outputs.logits # verify the logits UpperCAmelCase__ = (1, 2_18_41) self.assertEqual(logits.shape , _UpperCAmelCase ) UpperCAmelCase__ = np.array([1.6881, -0.2787, 0.5901] ) self.assertTrue(np.allclose(logits[0, :3] , _UpperCAmelCase , atol=1E-4 ) ) UpperCAmelCase__ = 23_96 self.assertEqual(logits.argmax(-1 ).item() , _UpperCAmelCase )
346
'''simple docstring''' import math def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or not number % 2: # Negatives, 0, 1 and all even numbers are not primes return False UpperCAmelCase__ = range(3 , int(math.sqrt(SCREAMING_SNAKE_CASE__ ) + 1 ) , 2 ) return not any(not number % i for i in odd_numbers ) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str]=1 , **SCREAMING_SNAKE_CASE__ : List[str] ): '''simple docstring''' UpperCAmelCase__ = factor * value UpperCAmelCase__ = value while not is_prime(SCREAMING_SNAKE_CASE__ ): value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1 if value == first_value_val: return next_prime(value + 1 , **SCREAMING_SNAKE_CASE__ ) return value
346
1
'''simple docstring''' import torch import torch.nn as nn from transformers.modeling_utils import ModuleUtilsMixin from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class lowerCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): '''simple docstring''' @register_to_config def __init__( self : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : float , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : bool = False , ): """simple docstring""" super().__init__() UpperCAmelCase__ = nn.Embedding(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = nn.Embedding(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = False UpperCAmelCase__ = nn.Dropout(p=_UpperCAmelCase ) UpperCAmelCase__ = TaConfig( vocab_size=_UpperCAmelCase , d_model=_UpperCAmelCase , num_heads=_UpperCAmelCase , d_kv=_UpperCAmelCase , d_ff=_UpperCAmelCase , dropout_rate=_UpperCAmelCase , feed_forward_proj=_UpperCAmelCase , is_decoder=_UpperCAmelCase , is_encoder_decoder=_UpperCAmelCase , ) UpperCAmelCase__ = nn.ModuleList() for lyr_num in range(_UpperCAmelCase ): UpperCAmelCase__ = TaBlock(_UpperCAmelCase ) self.encoders.append(_UpperCAmelCase ) UpperCAmelCase__ = TaLayerNorm(_UpperCAmelCase ) UpperCAmelCase__ = nn.Dropout(p=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : int , _UpperCAmelCase : str ): """simple docstring""" UpperCAmelCase__ = self.token_embedder(_UpperCAmelCase ) UpperCAmelCase__ = encoder_input_tokens.shape[1] UpperCAmelCase__ = torch.arange(_UpperCAmelCase , device=encoder_input_tokens.device ) x += self.position_encoding(_UpperCAmelCase ) UpperCAmelCase__ = self.dropout_pre(_UpperCAmelCase ) # inverted the attention mask UpperCAmelCase__ = encoder_input_tokens.size() UpperCAmelCase__ = self.get_extended_attention_mask(_UpperCAmelCase , _UpperCAmelCase ) for lyr in self.encoders: UpperCAmelCase__ = lyr(_UpperCAmelCase , _UpperCAmelCase )[0] UpperCAmelCase__ = self.layer_norm(_UpperCAmelCase ) return self.dropout_post(_UpperCAmelCase ), encoder_inputs_mask
346
'''simple docstring''' import string from math import logaa def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' UpperCAmelCase__ = document.translate( str.maketrans("""""" , """""" , string.punctuation ) ).replace("""\n""" , """""" ) UpperCAmelCase__ = document_without_punctuation.split(""" """ ) # word tokenization return len([word for word in tokenize_document if word.lower() == term.lower()] ) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' UpperCAmelCase__ = corpus.lower().translate( str.maketrans("""""" , """""" , string.punctuation ) ) # strip all punctuation and replace it with '' UpperCAmelCase__ = corpus_without_punctuation.split("""\n""" ) UpperCAmelCase__ = term.lower() return (len([doc for doc in docs if term in doc] ), len(SCREAMING_SNAKE_CASE__ )) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple=False ): '''simple docstring''' if smoothing: if n == 0: raise ValueError("""log10(0) is undefined.""" ) return round(1 + logaa(n / (1 + df) ) , 3 ) if df == 0: raise ZeroDivisionError("""df must be > 0""" ) elif n == 0: raise ValueError("""log10(0) is undefined.""" ) return round(logaa(n / df ) , 3 ) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' return round(tf * idf , 3 )
346
1
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING import torch from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' lowerCAmelCase_ : List[Any] = """dandelin/vilt-b32-finetuned-vqa""" lowerCAmelCase_ : int = ( """This is a tool that answers a question about an image. It takes an input named `image` which should be the """ """image containing the information, as well as a `question` which should be the question in English. It """ """returns a text that is the answer to the question.""" ) lowerCAmelCase_ : Dict = """image_qa""" lowerCAmelCase_ : int = AutoProcessor lowerCAmelCase_ : Dict = AutoModelForVisualQuestionAnswering lowerCAmelCase_ : Dict = ["""image""", """text"""] lowerCAmelCase_ : Dict = ["""text"""] def __init__( self : str , *_UpperCAmelCase : int , **_UpperCAmelCase : List[str] ): """simple docstring""" requires_backends(self , ["""vision"""] ) super().__init__(*_UpperCAmelCase , **_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : "Image" , _UpperCAmelCase : str ): """simple docstring""" return self.pre_processor(_UpperCAmelCase , _UpperCAmelCase , return_tensors="""pt""" ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : int ): """simple docstring""" with torch.no_grad(): return self.model(**_UpperCAmelCase ).logits def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : Optional[Any] ): """simple docstring""" UpperCAmelCase__ = outputs.argmax(-1 ).item() return self.model.config.idalabel[idx]
346
'''simple docstring''' import argparse import torch from transformers import BertForMaskedLM if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser( description=( 'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned' ' Distillation' ) ) parser.add_argument('--model_type', default='bert', choices=['bert']) parser.add_argument('--model_name', default='bert-base-uncased', type=str) parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str) parser.add_argument('--vocab_transform', action='store_true') UpperCAmelCase_ = parser.parse_args() if args.model_type == "bert": UpperCAmelCase_ = BertForMaskedLM.from_pretrained(args.model_name) UpperCAmelCase_ = 'bert' else: raise ValueError('args.model_type should be "bert".') UpperCAmelCase_ = model.state_dict() UpperCAmelCase_ = {} for w in ["word_embeddings", "position_embeddings"]: UpperCAmelCase_ = state_dict[f"{prefix}.embeddings.{w}.weight"] for w in ["weight", "bias"]: UpperCAmelCase_ = state_dict[f"{prefix}.embeddings.LayerNorm.{w}"] UpperCAmelCase_ = 0 for teacher_idx in [0, 2, 4, 7, 9, 1_1]: for w in ["weight", "bias"]: UpperCAmelCase_ = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}" ] UpperCAmelCase_ = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}" ] UpperCAmelCase_ = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}" ] UpperCAmelCase_ = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}" ] UpperCAmelCase_ = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}" ] UpperCAmelCase_ = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}" ] UpperCAmelCase_ = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}" ] UpperCAmelCase_ = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}" ] std_idx += 1 UpperCAmelCase_ = state_dict['cls.predictions.decoder.weight'] UpperCAmelCase_ = state_dict['cls.predictions.bias'] if args.vocab_transform: for w in ["weight", "bias"]: UpperCAmelCase_ = state_dict[f"cls.predictions.transform.dense.{w}"] UpperCAmelCase_ = state_dict[f"cls.predictions.transform.LayerNorm.{w}"] print(f"N layers selected for distillation: {std_idx}") print(f"Number of params transferred for distillation: {len(compressed_sd.keys())}") print(f"Save transferred checkpoint to {args.dump_checkpoint}.") torch.save(compressed_sd, args.dump_checkpoint)
346
1
'''simple docstring''' import torch from diffusers import UnCLIPScheduler from .test_schedulers import SchedulerCommonTest class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' lowerCAmelCase_ : Dict = (UnCLIPScheduler,) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , **_UpperCAmelCase : Optional[int] ): """simple docstring""" UpperCAmelCase__ = { """num_train_timesteps""": 10_00, """variance_type""": """fixed_small_log""", """clip_sample""": True, """clip_sample_range""": 1.0, """prediction_type""": """epsilon""", } config.update(**_UpperCAmelCase ) return config def SCREAMING_SNAKE_CASE__ ( self : int ): """simple docstring""" for timesteps in [1, 5, 1_00, 10_00]: self.check_over_configs(num_train_timesteps=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): """simple docstring""" for variance in ["fixed_small_log", "learned_range"]: self.check_over_configs(variance_type=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Dict ): """simple docstring""" for clip_sample in [True, False]: self.check_over_configs(clip_sample=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" for clip_sample_range in [1, 5, 10, 20]: self.check_over_configs(clip_sample_range=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : int ): """simple docstring""" for prediction_type in ["epsilon", "sample"]: self.check_over_configs(prediction_type=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): """simple docstring""" for time_step in [0, 5_00, 9_99]: for prev_timestep in [None, 5, 1_00, 2_50, 5_00, 7_50]: if prev_timestep is not None and prev_timestep >= time_step: continue self.check_over_forward(time_step=_UpperCAmelCase , prev_timestep=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" UpperCAmelCase__ = self.scheduler_classes[0] UpperCAmelCase__ = self.get_scheduler_config(variance_type="""fixed_small_log""" ) UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000E-10 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.054_9625 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.999_4987 ) ) < 1E-5 def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" UpperCAmelCase__ = self.scheduler_classes[0] UpperCAmelCase__ = self.get_scheduler_config(variance_type="""learned_range""" ) UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) UpperCAmelCase__ = 0.5 assert scheduler._get_variance(1 , predicted_variance=_UpperCAmelCase ) - -10.171_2790 < 1E-5 assert scheduler._get_variance(4_87 , predicted_variance=_UpperCAmelCase ) - -5.799_8052 < 1E-5 assert scheduler._get_variance(9_99 , predicted_variance=_UpperCAmelCase ) - -0.001_0011 < 1E-5 def SCREAMING_SNAKE_CASE__ ( self : int ): """simple docstring""" UpperCAmelCase__ = self.scheduler_classes[0] UpperCAmelCase__ = self.get_scheduler_config() UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) UpperCAmelCase__ = scheduler.timesteps UpperCAmelCase__ = self.dummy_model() UpperCAmelCase__ = self.dummy_sample_deter UpperCAmelCase__ = torch.manual_seed(0 ) for i, t in enumerate(_UpperCAmelCase ): # 1. predict noise residual UpperCAmelCase__ = model(_UpperCAmelCase , _UpperCAmelCase ) # 2. predict previous mean of sample x_t-1 UpperCAmelCase__ = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase ).prev_sample UpperCAmelCase__ = pred_prev_sample UpperCAmelCase__ = torch.sum(torch.abs(_UpperCAmelCase ) ) UpperCAmelCase__ = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_sum.item() - 252.268_2495 ) < 1E-2 assert abs(result_mean.item() - 0.328_4743 ) < 1E-3 def SCREAMING_SNAKE_CASE__ ( self : Tuple ): """simple docstring""" UpperCAmelCase__ = self.scheduler_classes[0] UpperCAmelCase__ = self.get_scheduler_config() UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(25 ) UpperCAmelCase__ = scheduler.timesteps UpperCAmelCase__ = self.dummy_model() UpperCAmelCase__ = self.dummy_sample_deter UpperCAmelCase__ = torch.manual_seed(0 ) for i, t in enumerate(_UpperCAmelCase ): # 1. predict noise residual UpperCAmelCase__ = model(_UpperCAmelCase , _UpperCAmelCase ) if i + 1 == timesteps.shape[0]: UpperCAmelCase__ = None else: UpperCAmelCase__ = timesteps[i + 1] # 2. predict previous mean of sample x_t-1 UpperCAmelCase__ = scheduler.step( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , prev_timestep=_UpperCAmelCase , generator=_UpperCAmelCase ).prev_sample UpperCAmelCase__ = pred_prev_sample UpperCAmelCase__ = torch.sum(torch.abs(_UpperCAmelCase ) ) UpperCAmelCase__ = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_sum.item() - 258.204_4983 ) < 1E-2 assert abs(result_mean.item() - 0.336_2038 ) < 1E-3 def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): """simple docstring""" pass def SCREAMING_SNAKE_CASE__ ( self : int ): """simple docstring""" pass
346
'''simple docstring''' import tempfile import torch from diffusers import PNDMScheduler from .test_schedulers import SchedulerCommonTest class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' lowerCAmelCase_ : Union[str, Any] = (PNDMScheduler,) lowerCAmelCase_ : Optional[int] = (("""num_inference_steps""", 50),) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , **_UpperCAmelCase : Optional[int] ): """simple docstring""" UpperCAmelCase__ = { """num_train_timesteps""": 10_00, """beta_start""": 0.0001, """beta_end""": 0.02, """beta_schedule""": """linear""", } config.update(**_UpperCAmelCase ) return config def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : Tuple=0 , **_UpperCAmelCase : List[str] ): """simple docstring""" UpperCAmelCase__ = dict(self.forward_default_kwargs ) UpperCAmelCase__ = kwargs.pop("""num_inference_steps""" , _UpperCAmelCase ) UpperCAmelCase__ = self.dummy_sample UpperCAmelCase__ = 0.1 * sample UpperCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: UpperCAmelCase__ = self.get_scheduler_config(**_UpperCAmelCase ) UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residuals UpperCAmelCase__ = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_UpperCAmelCase ) UpperCAmelCase__ = scheduler_class.from_pretrained(_UpperCAmelCase ) new_scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residuals UpperCAmelCase__ = dummy_past_residuals[:] UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample UpperCAmelCase__ = new_scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample UpperCAmelCase__ = new_scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" pass def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : Union[str, Any]=0 , **_UpperCAmelCase : Optional[int] ): """simple docstring""" UpperCAmelCase__ = dict(self.forward_default_kwargs ) UpperCAmelCase__ = kwargs.pop("""num_inference_steps""" , _UpperCAmelCase ) UpperCAmelCase__ = self.dummy_sample UpperCAmelCase__ = 0.1 * sample UpperCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: UpperCAmelCase__ = self.get_scheduler_config() UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residuals (must be after setting timesteps) UpperCAmelCase__ = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_UpperCAmelCase ) UpperCAmelCase__ = scheduler_class.from_pretrained(_UpperCAmelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residual (must be after setting timesteps) UpperCAmelCase__ = dummy_past_residuals[:] UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample UpperCAmelCase__ = new_scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample UpperCAmelCase__ = new_scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def SCREAMING_SNAKE_CASE__ ( self : int , **_UpperCAmelCase : Tuple ): """simple docstring""" UpperCAmelCase__ = self.scheduler_classes[0] UpperCAmelCase__ = self.get_scheduler_config(**_UpperCAmelCase ) UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) UpperCAmelCase__ = 10 UpperCAmelCase__ = self.dummy_model() UpperCAmelCase__ = self.dummy_sample_deter scheduler.set_timesteps(_UpperCAmelCase ) for i, t in enumerate(scheduler.prk_timesteps ): UpperCAmelCase__ = model(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample for i, t in enumerate(scheduler.plms_timesteps ): UpperCAmelCase__ = model(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample return sample def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" UpperCAmelCase__ = dict(self.forward_default_kwargs ) UpperCAmelCase__ = kwargs.pop("""num_inference_steps""" , _UpperCAmelCase ) for scheduler_class in self.scheduler_classes: UpperCAmelCase__ = self.get_scheduler_config() UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) UpperCAmelCase__ = self.dummy_sample UpperCAmelCase__ = 0.1 * sample if num_inference_steps is not None and hasattr(_UpperCAmelCase , """set_timesteps""" ): scheduler.set_timesteps(_UpperCAmelCase ) elif num_inference_steps is not None and not hasattr(_UpperCAmelCase , """set_timesteps""" ): UpperCAmelCase__ = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) UpperCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] UpperCAmelCase__ = dummy_past_residuals[:] UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , 0 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , 1 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , 0 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , 1 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def SCREAMING_SNAKE_CASE__ ( self : int ): """simple docstring""" for timesteps in [1_00, 10_00]: self.check_over_configs(num_train_timesteps=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" for steps_offset in [0, 1]: self.check_over_configs(steps_offset=_UpperCAmelCase ) UpperCAmelCase__ = self.scheduler_classes[0] UpperCAmelCase__ = self.get_scheduler_config(steps_offset=1 ) UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(10 ) assert torch.equal( scheduler.timesteps , torch.LongTensor( [9_01, 8_51, 8_51, 8_01, 8_01, 7_51, 7_51, 7_01, 7_01, 6_51, 6_51, 6_01, 6_01, 5_01, 4_01, 3_01, 2_01, 1_01, 1] ) , ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): """simple docstring""" for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ): self.check_over_configs(beta_start=_UpperCAmelCase , beta_end=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" for t in [1, 5, 10]: self.check_over_forward(time_step=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00] ): self.check_over_forward(num_inference_steps=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" UpperCAmelCase__ = 27 for scheduler_class in self.scheduler_classes: UpperCAmelCase__ = self.dummy_sample UpperCAmelCase__ = 0.1 * sample UpperCAmelCase__ = self.get_scheduler_config() UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(_UpperCAmelCase ) # before power of 3 fix, would error on first step, so we only need to do two for i, t in enumerate(scheduler.prk_timesteps[:2] ): UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample def SCREAMING_SNAKE_CASE__ ( self : Dict ): """simple docstring""" with self.assertRaises(_UpperCAmelCase ): UpperCAmelCase__ = self.scheduler_classes[0] UpperCAmelCase__ = self.get_scheduler_config() UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): """simple docstring""" UpperCAmelCase__ = self.full_loop() UpperCAmelCase__ = torch.sum(torch.abs(_UpperCAmelCase ) ) UpperCAmelCase__ = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_sum.item() - 198.1318 ) < 1E-2 assert abs(result_mean.item() - 0.2580 ) < 1E-3 def SCREAMING_SNAKE_CASE__ ( self : Dict ): """simple docstring""" UpperCAmelCase__ = self.full_loop(prediction_type="""v_prediction""" ) UpperCAmelCase__ = torch.sum(torch.abs(_UpperCAmelCase ) ) UpperCAmelCase__ = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_sum.item() - 67.3986 ) < 1E-2 assert abs(result_mean.item() - 0.0878 ) < 1E-3 def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" UpperCAmelCase__ = self.full_loop(set_alpha_to_one=_UpperCAmelCase , beta_start=0.01 ) UpperCAmelCase__ = torch.sum(torch.abs(_UpperCAmelCase ) ) UpperCAmelCase__ = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_sum.item() - 230.0399 ) < 1E-2 assert abs(result_mean.item() - 0.2995 ) < 1E-3 def SCREAMING_SNAKE_CASE__ ( self : int ): """simple docstring""" UpperCAmelCase__ = self.full_loop(set_alpha_to_one=_UpperCAmelCase , beta_start=0.01 ) UpperCAmelCase__ = torch.sum(torch.abs(_UpperCAmelCase ) ) UpperCAmelCase__ = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_sum.item() - 186.9482 ) < 1E-2 assert abs(result_mean.item() - 0.2434 ) < 1E-3
346
1
'''simple docstring''' from manim import * class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" UpperCAmelCase__ = Rectangle(height=0.5 , width=0.5 ) UpperCAmelCase__ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) UpperCAmelCase__ = [mem.copy() for i in range(6 )] UpperCAmelCase__ = [mem.copy() for i in range(6 )] UpperCAmelCase__ = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) UpperCAmelCase__ = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) UpperCAmelCase__ = VGroup(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) UpperCAmelCase__ = Text("""CPU""" , font_size=24 ) UpperCAmelCase__ = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase ) cpu.move_to([-2.5, -0.5, 0] ) self.add(_UpperCAmelCase ) UpperCAmelCase__ = [mem.copy() for i in range(4 )] UpperCAmelCase__ = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) UpperCAmelCase__ = Text("""GPU""" , font_size=24 ) UpperCAmelCase__ = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase ) gpu.move_to([-1, -1, 0] ) self.add(_UpperCAmelCase ) UpperCAmelCase__ = [mem.copy() for i in range(6 )] UpperCAmelCase__ = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) UpperCAmelCase__ = Text("""Model""" , font_size=24 ) UpperCAmelCase__ = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase ) model.move_to([3, -1.0, 0] ) self.add(_UpperCAmelCase ) UpperCAmelCase__ = [] for i, rect in enumerate(_UpperCAmelCase ): rect.set_stroke(_UpperCAmelCase ) # target = fill.copy().set_fill(YELLOW, opacity=0.7) # target.move_to(rect) # self.add(target) UpperCAmelCase__ = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(_UpperCAmelCase , opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_UpperCAmelCase ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(cpu_targs[0] , direction=_UpperCAmelCase , buff=0.0 ) else: cpu_target.next_to(cpu_targs[i - 1] , direction=_UpperCAmelCase , buff=0.0 ) self.add(_UpperCAmelCase ) cpu_targs.append(_UpperCAmelCase ) UpperCAmelCase__ = [mem.copy() for i in range(6 )] UpperCAmelCase__ = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) UpperCAmelCase__ = Text("""Loaded Checkpoint""" , font_size=24 ) UpperCAmelCase__ = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , aligned_edge=_UpperCAmelCase , buff=0.4 ) checkpoint.move_to([3, 0.5, 0] ) UpperCAmelCase__ = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) UpperCAmelCase__ = MarkupText( f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = MarkupText( f'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , ) blue_text.next_to(_UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() ) UpperCAmelCase__ = MarkupText( f'''Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.''' , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(_UpperCAmelCase ) , Write(_UpperCAmelCase ) ) self.play(Write(_UpperCAmelCase , run_time=1 ) , Create(_UpperCAmelCase , run_time=1 ) ) UpperCAmelCase__ = [] UpperCAmelCase__ = [] for i, rect in enumerate(_UpperCAmelCase ): UpperCAmelCase__ = fill.copy().set_fill(_UpperCAmelCase , opacity=0.7 ) target.move_to(_UpperCAmelCase ) first_animations.append(GrowFromCenter(_UpperCAmelCase , run_time=1 ) ) UpperCAmelCase__ = target.copy() cpu_target.generate_target() if i < 5: cpu_target.target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.target.move_to(cpu_right_col_base[i - 5] ) second_animations.append(MoveToTarget(_UpperCAmelCase , run_time=1.5 ) ) self.play(*_UpperCAmelCase ) self.play(*_UpperCAmelCase ) self.wait()
346
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { 'google/vivit-b-16x2-kinetics400': ( 'https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json' ), # See all Vivit models at https://huggingface.co/models?filter=vivit } class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' lowerCAmelCase_ : Optional[int] = """vivit""" def __init__( self : List[str] , _UpperCAmelCase : List[Any]=2_24 , _UpperCAmelCase : List[str]=32 , _UpperCAmelCase : Any=[2, 16, 16] , _UpperCAmelCase : int=3 , _UpperCAmelCase : Optional[Any]=7_68 , _UpperCAmelCase : Union[str, Any]=12 , _UpperCAmelCase : Dict=12 , _UpperCAmelCase : Optional[Any]=30_72 , _UpperCAmelCase : Optional[int]="gelu_fast" , _UpperCAmelCase : Union[str, Any]=0.0 , _UpperCAmelCase : Tuple=0.0 , _UpperCAmelCase : Optional[int]=0.02 , _UpperCAmelCase : List[Any]=1E-06 , _UpperCAmelCase : List[str]=True , **_UpperCAmelCase : List[Any] , ): """simple docstring""" UpperCAmelCase__ = hidden_size UpperCAmelCase__ = num_hidden_layers UpperCAmelCase__ = num_attention_heads UpperCAmelCase__ = intermediate_size UpperCAmelCase__ = hidden_act UpperCAmelCase__ = hidden_dropout_prob UpperCAmelCase__ = attention_probs_dropout_prob UpperCAmelCase__ = initializer_range UpperCAmelCase__ = layer_norm_eps UpperCAmelCase__ = image_size UpperCAmelCase__ = num_frames UpperCAmelCase__ = tubelet_size UpperCAmelCase__ = num_channels UpperCAmelCase__ = qkv_bias super().__init__(**_UpperCAmelCase )
346
1
'''simple docstring''' import unittest import numpy as np import torch from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @property def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" torch.manual_seed(0 ) UpperCAmelCase__ = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , ) return model def SCREAMING_SNAKE_CASE__ ( self : Tuple ): """simple docstring""" UpperCAmelCase__ = self.dummy_uncond_unet UpperCAmelCase__ = PNDMScheduler() UpperCAmelCase__ = PNDMPipeline(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase ) pndm.to(_UpperCAmelCase ) pndm.set_progress_bar_config(disable=_UpperCAmelCase ) UpperCAmelCase__ = torch.manual_seed(0 ) UpperCAmelCase__ = pndm(generator=_UpperCAmelCase , num_inference_steps=20 , output_type="""numpy""" ).images UpperCAmelCase__ = torch.manual_seed(0 ) UpperCAmelCase__ = pndm(generator=_UpperCAmelCase , num_inference_steps=20 , output_type="""numpy""" , return_dict=_UpperCAmelCase )[0] UpperCAmelCase__ = image[0, -3:, -3:, -1] UpperCAmelCase__ = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) UpperCAmelCase__ = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Dict ): """simple docstring""" UpperCAmelCase__ = """google/ddpm-cifar10-32""" UpperCAmelCase__ = UNetaDModel.from_pretrained(_UpperCAmelCase ) UpperCAmelCase__ = PNDMScheduler() UpperCAmelCase__ = PNDMPipeline(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase ) pndm.to(_UpperCAmelCase ) pndm.set_progress_bar_config(disable=_UpperCAmelCase ) UpperCAmelCase__ = torch.manual_seed(0 ) UpperCAmelCase__ = pndm(generator=_UpperCAmelCase , output_type="""numpy""" ).images UpperCAmelCase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) UpperCAmelCase__ = np.array([0.1564, 0.1_4645, 0.1406, 0.1_4715, 0.1_2425, 0.1_4045, 0.1_3115, 0.1_2175, 0.125] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
346
'''simple docstring''' import warnings from ...utils import logging from .image_processing_deit import DeiTImageProcessor UpperCAmelCase_ = logging.get_logger(__name__) class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' def __init__( self : List[str] , *_UpperCAmelCase : Optional[int] , **_UpperCAmelCase : Optional[Any] ): """simple docstring""" warnings.warn( """The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use DeiTImageProcessor instead.""" , _UpperCAmelCase , ) super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
346
1
'''simple docstring''' from __future__ import annotations def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' if len(SCREAMING_SNAKE_CASE__ ) < k or k < 0: raise ValueError("""Invalid Input""" ) UpperCAmelCase__ = UpperCAmelCase__ = sum(array[:k] ) for i in range(len(SCREAMING_SNAKE_CASE__ ) - k ): UpperCAmelCase__ = current_sum - array[i] + array[i + k] UpperCAmelCase__ = max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return max_sum if __name__ == "__main__": from doctest import testmod from random import randint testmod() UpperCAmelCase_ = [randint(-1_0_0_0, 1_0_0_0) for i in range(1_0_0)] UpperCAmelCase_ = randint(0, 1_1_0) print(f"The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}")
346
'''simple docstring''' import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = {'vocab_file': 'spiece.model'} UpperCAmelCase_ = { 'vocab_file': { 'TsinghuaAI/CPM-Generate': 'https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model', } } class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' def __init__( self : Dict , _UpperCAmelCase : str , _UpperCAmelCase : Any=False , _UpperCAmelCase : int=True , _UpperCAmelCase : Union[str, Any]=False , _UpperCAmelCase : Dict="<s>" , _UpperCAmelCase : int="</s>" , _UpperCAmelCase : Dict="<unk>" , _UpperCAmelCase : Tuple="<sep>" , _UpperCAmelCase : List[Any]="<pad>" , _UpperCAmelCase : int="<cls>" , _UpperCAmelCase : Union[str, Any]="<mask>" , _UpperCAmelCase : List[str]=["<eop>", "<eod>"] , _UpperCAmelCase : Optional[Dict[str, Any]] = None , **_UpperCAmelCase : int , ): """simple docstring""" UpperCAmelCase__ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token UpperCAmelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=_UpperCAmelCase , remove_space=_UpperCAmelCase , keep_accents=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , ) UpperCAmelCase__ = 3 UpperCAmelCase__ = do_lower_case UpperCAmelCase__ = remove_space UpperCAmelCase__ = keep_accents UpperCAmelCase__ = vocab_file UpperCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_UpperCAmelCase ) try: import jieba except ModuleNotFoundError as error: raise error.__class__( """You need to install jieba to use CpmTokenizer or CpmTokenizerFast. """ """See https://pypi.org/project/jieba/ for installation.""" ) UpperCAmelCase__ = jieba UpperCAmelCase__ = str.maketrans(""" \n""" , """\u2582\u2583""" ) @property # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" return len(self.sp_model ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" UpperCAmelCase__ = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Dict ): """simple docstring""" UpperCAmelCase__ = self.__dict__.copy() UpperCAmelCase__ = None return state def __setstate__( self : Union[str, Any] , _UpperCAmelCase : Union[str, Any] ): """simple docstring""" UpperCAmelCase__ = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): UpperCAmelCase__ = {} UpperCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : Optional[Any] ): """simple docstring""" if self.remove_space: UpperCAmelCase__ = """ """.join(inputs.strip().split() ) else: UpperCAmelCase__ = inputs UpperCAmelCase__ = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" ) if not self.keep_accents: UpperCAmelCase__ = unicodedata.normalize("""NFKD""" , _UpperCAmelCase ) UpperCAmelCase__ = """""".join([c for c in outputs if not unicodedata.combining(_UpperCAmelCase )] ) if self.do_lower_case: UpperCAmelCase__ = outputs.lower() return outputs def SCREAMING_SNAKE_CASE__ ( self : Tuple , _UpperCAmelCase : str ): """simple docstring""" UpperCAmelCase__ = self.preprocess_text(_UpperCAmelCase ) UpperCAmelCase__ = self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase ) UpperCAmelCase__ = [] for piece in pieces: if len(_UpperCAmelCase ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit(): UpperCAmelCase__ = self.sp_model.EncodeAsPieces(piece[:-1].replace(_UpperCAmelCase , """""" ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: UpperCAmelCase__ = cur_pieces[1:] else: UpperCAmelCase__ = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(_UpperCAmelCase ) else: new_pieces.append(_UpperCAmelCase ) return new_pieces def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : Union[str, Any] ): """simple docstring""" return self.sp_model.PieceToId(_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : Any ): """simple docstring""" return self.sp_model.IdToPiece(_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : Dict ): """simple docstring""" UpperCAmelCase__ = """""".join(_UpperCAmelCase ).replace(_UpperCAmelCase , """ """ ).strip() return out_string def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ): """simple docstring""" UpperCAmelCase__ = [self.sep_token_id] UpperCAmelCase__ = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None , _UpperCAmelCase : bool = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase ) if token_ids_a is not None: return ([0] * len(_UpperCAmelCase )) + [1] + ([0] * len(_UpperCAmelCase )) + [1, 1] return ([0] * len(_UpperCAmelCase )) + [1, 1] def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ): """simple docstring""" UpperCAmelCase__ = [self.sep_token_id] UpperCAmelCase__ = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ): """simple docstring""" if not os.path.isdir(_UpperCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return UpperCAmelCase__ = os.path.join( _UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(_UpperCAmelCase , """wb""" ) as fi: UpperCAmelCase__ = self.sp_model.serialized_model_proto() fi.write(_UpperCAmelCase ) return (out_vocab_file,) def SCREAMING_SNAKE_CASE__ ( self : Tuple , *_UpperCAmelCase : Tuple , **_UpperCAmelCase : Optional[Any] ): """simple docstring""" UpperCAmelCase__ = super()._decode(*_UpperCAmelCase , **_UpperCAmelCase ) UpperCAmelCase__ = text.replace(""" """ , """""" ).replace("""\u2582""" , """ """ ).replace("""\u2583""" , """\n""" ) return text
346
1
'''simple docstring''' from collections import defaultdict from math import ceil, sqrt def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int = 1000000 , SCREAMING_SNAKE_CASE__ : int = 10 ): '''simple docstring''' UpperCAmelCase__ = defaultdict(SCREAMING_SNAKE_CASE__ ) for outer_width in range(3 , (t_limit // 4) + 2 ): if outer_width * outer_width > t_limit: UpperCAmelCase__ = max( ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 ) else: UpperCAmelCase__ = 1 hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2 for hole_width in range(SCREAMING_SNAKE_CASE__ , outer_width - 1 , 2 ): count[outer_width * outer_width - hole_width * hole_width] += 1 return sum(1 for n in count.values() if 1 <= n <= 10 ) if __name__ == "__main__": print(f"{solution() = }")
346
'''simple docstring''' import argparse import logging import os import datasets import tensorflow as tf from transformers import AutoTokenizer UpperCAmelCase_ = logging.getLogger(__name__) def _UpperCamelCase ( ): '''simple docstring''' UpperCAmelCase__ = argparse.ArgumentParser( description="""Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.""" ) parser.add_argument( """--dataset_name""" , type=SCREAMING_SNAKE_CASE__ , default="""wikitext""" , help="""Name of the training. Explore datasets at: hf.co/datasets.""" , ) parser.add_argument( """--dataset_config""" , type=SCREAMING_SNAKE_CASE__ , default="""wikitext-103-raw-v1""" , help="""Configuration name of the dataset.""" ) parser.add_argument( """--tokenizer_name_or_path""" , type=SCREAMING_SNAKE_CASE__ , default="""sayakpaul/unigram-tokenizer-wikitext""" , help="""Tokenizer identifier. Can be a local filepath or a Hub identifier.""" , ) parser.add_argument( """--shard_size""" , type=SCREAMING_SNAKE_CASE__ , default=1000 , help="""Number of entries to go in a single shard.""" , ) parser.add_argument("""--split""" , type=SCREAMING_SNAKE_CASE__ , default="""train""" , choices=["""train""", """test""", """validation"""] ) parser.add_argument( """--limit""" , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , help="""Limit the number of shards (used for debugging).""" , ) parser.add_argument( """--max_length""" , type=SCREAMING_SNAKE_CASE__ , default=512 , help="""Maximum sequence length. For training on TPUs, it helps to have a maximum""" """ sequence length that is a multiple of 8.""" , ) parser.add_argument( """--output_dir""" , default="""tf-tpu""" , type=SCREAMING_SNAKE_CASE__ , help="""Output directory where the TFRecord shards will be saved. If the""" """ path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord""" """ shards will be directly saved to a Google Cloud Storage bucket.""" , ) UpperCAmelCase__ = parser.parse_args() return args def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Any ): '''simple docstring''' def fn(SCREAMING_SNAKE_CASE__ : Union[str, Any] ): return tokenizer(examples["""text"""] ) return fn def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] ): '''simple docstring''' UpperCAmelCase__ = [] for i in range(len(tokenized_data["""input_ids"""] ) ): UpperCAmelCase__ = { """input_ids""": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["""input_ids"""][i] ) ), """attention_mask""": tf.train.Feature( intaa_list=tf.train.IntaaList(value=tokenized_data["""attention_mask"""][i] ) ), } UpperCAmelCase__ = tf.train.Features(feature=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = tf.train.Example(features=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = example.SerializeToString() records.append(SCREAMING_SNAKE_CASE__ ) return records def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Any ): '''simple docstring''' UpperCAmelCase__ = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split ) if args.limit is not None: UpperCAmelCase__ = min(len(SCREAMING_SNAKE_CASE__ ) , args.limit ) UpperCAmelCase__ = dataset.select(range(SCREAMING_SNAKE_CASE__ ) ) print(F'''Limiting the dataset to {args.limit} entries.''' ) UpperCAmelCase__ = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path ) # Handle output directory creation. # For serializing into a Google Cloud Storage Bucket, one needs to first # create a bucket. if "gs" not in args.output_dir: if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) UpperCAmelCase__ = os.path.join(args.output_dir , args.split ) if not os.path.exists(SCREAMING_SNAKE_CASE__ ): os.makedirs(SCREAMING_SNAKE_CASE__ ) else: UpperCAmelCase__ = os.path.join(args.output_dir , args.split ) # Tokenize the whole dataset at once. UpperCAmelCase__ = tokenize_function(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = dataset.map(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , num_proc=4 , remove_columns=["""text"""] ) # We need to concatenate all our texts together, and then split the result # into chunks of a fixed size, which we will call block_size. To do this, we # will use the map method again, with the option batched=True. When we use batched=True, # the function we pass to map() will be passed multiple inputs at once, allowing us # to group them into more or fewer examples than we had in the input. # This allows us to create our new fixed-length samples. The advantage of this # method is that we don't lose a whole lot of content from the dataset compared to the # case where we simply tokenize with a pre-defined max_length. def group_texts(SCREAMING_SNAKE_CASE__ : int ): # Concatenate all texts. UpperCAmelCase__ = {k: sum(examples[k] , [] ) for k in examples.keys()} UpperCAmelCase__ = len(concatenated_examples[list(examples.keys() )[0]] ) # We drop the small remainder, though you could add padding instead if the model supports it # In this, as in all things, we advise you to follow your heart 🫀 UpperCAmelCase__ = (total_length // args.max_length) * args.max_length # Split by chunks of max_len. UpperCAmelCase__ = { k: [t[i : i + args.max_length] for i in range(0 , SCREAMING_SNAKE_CASE__ , args.max_length )] for k, t in concatenated_examples.items() } return result UpperCAmelCase__ = dataset_tokenized.map(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , batch_size=1000 , num_proc=4 ) UpperCAmelCase__ = 0 UpperCAmelCase__ = 0 for shard in range(0 , len(SCREAMING_SNAKE_CASE__ ) , args.shard_size ): UpperCAmelCase__ = grouped_dataset[shard : shard + args.shard_size] UpperCAmelCase__ = len(dataset_snapshot["""input_ids"""] ) UpperCAmelCase__ = os.path.join(SCREAMING_SNAKE_CASE__ , F'''dataset-{shard_count}-{records_containing}.tfrecord''' ) UpperCAmelCase__ = get_serialized_examples(SCREAMING_SNAKE_CASE__ ) with tf.io.TFRecordWriter(SCREAMING_SNAKE_CASE__ ) as out_file: for i in range(len(SCREAMING_SNAKE_CASE__ ) ): UpperCAmelCase__ = serialized_examples[i] out_file.write(SCREAMING_SNAKE_CASE__ ) print("""Wrote file {} containing {} records""".format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) shard_count += 1 total_records += records_containing with open(F'''split-{args.split}-records-count.txt''' , """w""" ) as f: print(F'''Total {args.split} records: {total_records}''' , file=SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": UpperCAmelCase_ = parse_args() main(args)
346
1
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SwiftFormerConfig, SwiftFormerForImageClassification, ViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = torch.device('cpu') def _UpperCamelCase ( ): '''simple docstring''' UpperCAmelCase__ = """http://images.cocodataset.org/val2017/000000039769.jpg""" UpperCAmelCase__ = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw ) return im def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] ): '''simple docstring''' if swiftformer_name == "swiftformer_xs": return torch.tensor([-2.1703e00, 2.1107e00, -2.0811e00, 8.8685e-01, 2.4360e-01] ) elif swiftformer_name == "swiftformer_s": return torch.tensor([3.9636e-01, 2.3478e-01, -1.6963e00, -1.7381e00, -8.6337e-01] ) elif swiftformer_name == "swiftformer_l1": return torch.tensor([-4.2768e-01, -4.7429e-01, -1.0897e00, -1.0248e00, 3.5523e-02] ) elif swiftformer_name == "swiftformer_l3": return torch.tensor([-2.5330e-01, 2.4211e-01, -6.0185e-01, -8.2789e-01, -6.0446e-02] ) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] ): '''simple docstring''' UpperCAmelCase__ = dct.pop(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = val def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Any ): '''simple docstring''' UpperCAmelCase__ = [] for k in state_dict.keys(): UpperCAmelCase__ = k if ".pwconv" in k: UpperCAmelCase__ = k_new.replace(""".pwconv""" , """.point_wise_conv""" ) if ".dwconv" in k: UpperCAmelCase__ = k_new.replace(""".dwconv""" , """.depth_wise_conv""" ) if ".Proj." in k: UpperCAmelCase__ = k_new.replace(""".Proj.""" , """.proj.""" ) if "patch_embed" in k_new: UpperCAmelCase__ = k_new.replace("""patch_embed""" , """swiftformer.patch_embed.patch_embedding""" ) if "network" in k_new: UpperCAmelCase__ = k_new.split(""".""" ) if ls[2].isdigit(): UpperCAmelCase__ = """swiftformer.encoder.network.""" + ls[1] + """.blocks.""" + ls[2] + """.""" + """.""".join(ls[3:] ) else: UpperCAmelCase__ = k_new.replace("""network""" , """swiftformer.encoder.network""" ) rename_keys.append((k, k_new) ) return rename_keys @torch.no_grad() def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' UpperCAmelCase__ = SwiftFormerConfig() # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size UpperCAmelCase__ = 1000 UpperCAmelCase__ = """huggingface/label-files""" UpperCAmelCase__ = """imagenet-1k-id2label.json""" UpperCAmelCase__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type="""dataset""" ) , """r""" ) ) UpperCAmelCase__ = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()} UpperCAmelCase__ = idalabel UpperCAmelCase__ = {v: k for k, v in idalabel.items()} # size of the architecture if swiftformer_name == "swiftformer_xs": UpperCAmelCase__ = [3, 3, 6, 4] UpperCAmelCase__ = [48, 56, 112, 220] elif swiftformer_name == "swiftformer_s": UpperCAmelCase__ = [3, 3, 9, 6] UpperCAmelCase__ = [48, 64, 168, 224] elif swiftformer_name == "swiftformer_l1": UpperCAmelCase__ = [4, 3, 10, 5] UpperCAmelCase__ = [48, 96, 192, 384] elif swiftformer_name == "swiftformer_l3": UpperCAmelCase__ = [4, 4, 12, 6] UpperCAmelCase__ = [64, 128, 320, 512] # load state_dict of original model, remove and rename some keys if original_ckpt: if original_ckpt.startswith("""https""" ): UpperCAmelCase__ = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ , map_location="""cpu""" , check_hash=SCREAMING_SNAKE_CASE__ ) else: UpperCAmelCase__ = torch.load(SCREAMING_SNAKE_CASE__ , map_location="""cpu""" ) UpperCAmelCase__ = checkpoint UpperCAmelCase__ = create_rename_keys(SCREAMING_SNAKE_CASE__ ) for rename_key_src, rename_key_dest in rename_keys: rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # load HuggingFace model UpperCAmelCase__ = SwiftFormerForImageClassification(SCREAMING_SNAKE_CASE__ ).eval() hf_model.load_state_dict(SCREAMING_SNAKE_CASE__ ) # prepare test inputs UpperCAmelCase__ = prepare_img() UpperCAmelCase__ = ViTImageProcessor.from_pretrained("""preprocessor_config""" ) UpperCAmelCase__ = processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ) # compare outputs from both models UpperCAmelCase__ = get_expected_output(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = hf_model(inputs["""pixel_values"""] ).logits assert hf_logits.shape == torch.Size([1, 1000] ) assert torch.allclose(hf_logits[0, 0:5] , SCREAMING_SNAKE_CASE__ , atol=1e-3 ) Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ ) print(F'''Saving model {swiftformer_name} to {pytorch_dump_folder_path}''' ) hf_model.save_pretrained(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--swiftformer_name', default='swiftformer_xs', choices=['swiftformer_xs', 'swiftformer_s', 'swiftformer_l1', 'swiftformer_l3'], type=str, help='Name of the SwiftFormer model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default='./converted_outputs/', type=str, help='Path to the output PyTorch model directory.', ) parser.add_argument('--original_ckpt', default=None, type=str, help='Path to the original model checkpoint.') UpperCAmelCase_ = parser.parse_args() convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
346
'''simple docstring''' import numpy as np import torch from torch.nn import CrossEntropyLoss from transformers import AutoModelForCausalLM, AutoTokenizer import datasets from datasets import logging UpperCAmelCase_ = '\\n\n' UpperCAmelCase_ = '\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n' UpperCAmelCase_ = '\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to \'cuda\' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 78.22\n >>> print(round(results["perplexities"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = datasets.load_dataset("wikitext",\n ... "wikitext-2-raw-v1",\n ... split="test")["text"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!=\'\']\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 60.35\n >>> print(round(results["perplexities"][0], 2))\n 81.12\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase_ ( datasets.Metric ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Dict ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """input_texts""": datasets.Value("""string""" ), } ) , reference_urls=["""https://huggingface.co/docs/transformers/perplexity"""] , ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : int = 16 , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[int]=None ): """simple docstring""" if device is not None: assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu." if device == "gpu": UpperCAmelCase__ = """cuda""" else: UpperCAmelCase__ = """cuda""" if torch.cuda.is_available() else """cpu""" UpperCAmelCase__ = AutoModelForCausalLM.from_pretrained(_UpperCAmelCase ) UpperCAmelCase__ = model.to(_UpperCAmelCase ) UpperCAmelCase__ = AutoTokenizer.from_pretrained(_UpperCAmelCase ) # if batch_size > 1 (which generally leads to padding being required), and # if there is not an already assigned pad_token, assign an existing # special token to also be the padding token if tokenizer.pad_token is None and batch_size > 1: UpperCAmelCase__ = list(tokenizer.special_tokens_map_extended.values() ) # check that the model already has at least one special token defined assert ( len(_UpperCAmelCase ) > 0 ), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1." # assign one of the special tokens to also be the pad token tokenizer.add_special_tokens({"""pad_token""": existing_special_tokens[0]} ) if add_start_token: # leave room for <BOS> token to be added: assert ( tokenizer.bos_token is not None ), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False" UpperCAmelCase__ = model.config.max_length - 1 else: UpperCAmelCase__ = model.config.max_length UpperCAmelCase__ = tokenizer( _UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , return_tensors="""pt""" , return_attention_mask=_UpperCAmelCase , ).to(_UpperCAmelCase ) UpperCAmelCase__ = encodings["""input_ids"""] UpperCAmelCase__ = encodings["""attention_mask"""] # check that each input is long enough: if add_start_token: assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long." else: assert torch.all( torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings." UpperCAmelCase__ = [] UpperCAmelCase__ = CrossEntropyLoss(reduction="""none""" ) for start_index in logging.tqdm(range(0 , len(_UpperCAmelCase ) , _UpperCAmelCase ) ): UpperCAmelCase__ = min(start_index + batch_size , len(_UpperCAmelCase ) ) UpperCAmelCase__ = encoded_texts[start_index:end_index] UpperCAmelCase__ = attn_masks[start_index:end_index] if add_start_token: UpperCAmelCase__ = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(_UpperCAmelCase ) UpperCAmelCase__ = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 ) UpperCAmelCase__ = torch.cat( [torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(_UpperCAmelCase ), attn_mask] , dim=1 ) UpperCAmelCase__ = encoded_batch with torch.no_grad(): UpperCAmelCase__ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase ).logits UpperCAmelCase__ = out_logits[..., :-1, :].contiguous() UpperCAmelCase__ = labels[..., 1:].contiguous() UpperCAmelCase__ = attn_mask[..., 1:].contiguous() UpperCAmelCase__ = torch.expa( (loss_fct(shift_logits.transpose(1 , 2 ) , _UpperCAmelCase ) * shift_attention_mask_batch).sum(1 ) / shift_attention_mask_batch.sum(1 ) ) ppls += perplexity_batch.tolist() return {"perplexities": ppls, "mean_perplexity": np.mean(_UpperCAmelCase )}
346
1
'''simple docstring''' import importlib import shutil import threading import warnings from typing import List import fsspec import fsspec.asyn from . import compression from .hffilesystem import HfFileSystem UpperCAmelCase_ = importlib.util.find_spec('s3fs') is not None if _has_safs: from .safilesystem import SaFileSystem # noqa: F401 UpperCAmelCase_ = [ compression.BzaFileSystem, compression.GzipFileSystem, compression.LzaFileSystem, compression.XzFileSystem, compression.ZstdFileSystem, ] # Register custom filesystems for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]: if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class: warnings.warn(f"A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.") fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' if "://" in dataset_path: UpperCAmelCase__ = dataset_path.split("""://""" )[1] return dataset_path def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : fsspec.AbstractFileSystem ): '''simple docstring''' if fs is not None and fs.protocol != "file": return True else: return False def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : fsspec.AbstractFileSystem , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' UpperCAmelCase__ = not is_remote_filesystem(SCREAMING_SNAKE_CASE__ ) if is_local: # LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory shutil.move(fs._strip_protocol(SCREAMING_SNAKE_CASE__ ) , fs._strip_protocol(SCREAMING_SNAKE_CASE__ ) ) else: fs.mv(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , recursive=SCREAMING_SNAKE_CASE__ ) def _UpperCamelCase ( ): '''simple docstring''' if hasattr(fsspec.asyn , """reset_lock""" ): # for future fsspec>2022.05.0 fsspec.asyn.reset_lock() else: UpperCAmelCase__ = None UpperCAmelCase__ = None UpperCAmelCase__ = threading.Lock()
346
'''simple docstring''' def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int = 1000000 ): '''simple docstring''' UpperCAmelCase__ = [i - 1 for i in range(limit + 1 )] for i in range(2 , limit + 1 ): if phi[i] == i - 1: for j in range(2 * i , limit + 1 , SCREAMING_SNAKE_CASE__ ): phi[j] -= phi[j] // i return sum(phi[2 : limit + 1] ) if __name__ == "__main__": print(solution())
346
1
'''simple docstring''' from typing import Optional import pyspark from .. import Features, NamedSplit from ..download import DownloadMode from ..packaged_modules.spark.spark import Spark from .abc import AbstractDatasetReader class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' def __init__( self : Tuple , _UpperCAmelCase : pyspark.sql.DataFrame , _UpperCAmelCase : Optional[NamedSplit] = None , _UpperCAmelCase : Optional[Features] = None , _UpperCAmelCase : bool = True , _UpperCAmelCase : str = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : str = None , _UpperCAmelCase : bool = True , _UpperCAmelCase : str = "arrow" , **_UpperCAmelCase : Tuple , ): """simple docstring""" super().__init__( split=_UpperCAmelCase , features=_UpperCAmelCase , cache_dir=_UpperCAmelCase , keep_in_memory=_UpperCAmelCase , streaming=_UpperCAmelCase , **_UpperCAmelCase , ) UpperCAmelCase__ = load_from_cache_file UpperCAmelCase__ = file_format UpperCAmelCase__ = Spark( df=_UpperCAmelCase , features=_UpperCAmelCase , cache_dir=_UpperCAmelCase , working_dir=_UpperCAmelCase , **_UpperCAmelCase , ) def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" if self.streaming: return self.builder.as_streaming_dataset(split=self.split ) UpperCAmelCase__ = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD self.builder.download_and_prepare( download_mode=_UpperCAmelCase , file_format=self._file_format , ) return self.builder.as_dataset(split=self.split )
346
'''simple docstring''' from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING UpperCAmelCase_ = logging.get_logger(__name__) @add_end_docstrings(lowerCamelCase_ ) class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' def __init__( self : Optional[Any] , *_UpperCAmelCase : Union[str, Any] , **_UpperCAmelCase : Dict ): """simple docstring""" super().__init__(*_UpperCAmelCase , **_UpperCAmelCase ) requires_backends(self , """vision""" ) self.check_model_type( TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING if self.framework == """tf""" else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING ) def SCREAMING_SNAKE_CASE__ ( self : str , _UpperCAmelCase : List[Any]=None ): """simple docstring""" UpperCAmelCase__ = {} if top_k is not None: UpperCAmelCase__ = top_k return {}, {}, postprocess_params def __call__( self : Any , _UpperCAmelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_UpperCAmelCase : str ): """simple docstring""" return super().__call__(_UpperCAmelCase , **_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , _UpperCAmelCase : Tuple ): """simple docstring""" UpperCAmelCase__ = load_image(_UpperCAmelCase ) UpperCAmelCase__ = self.image_processor(images=_UpperCAmelCase , return_tensors=self.framework ) return model_inputs def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : Tuple ): """simple docstring""" UpperCAmelCase__ = self.model(**_UpperCAmelCase ) return model_outputs def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : str=5 ): """simple docstring""" if top_k > self.model.config.num_labels: UpperCAmelCase__ = self.model.config.num_labels if self.framework == "pt": UpperCAmelCase__ = model_outputs.logits.softmax(-1 )[0] UpperCAmelCase__ , UpperCAmelCase__ = probs.topk(_UpperCAmelCase ) elif self.framework == "tf": UpperCAmelCase__ = stable_softmax(model_outputs.logits , axis=-1 )[0] UpperCAmelCase__ = tf.math.top_k(_UpperCAmelCase , k=_UpperCAmelCase ) UpperCAmelCase__ , UpperCAmelCase__ = topk.values.numpy(), topk.indices.numpy() else: raise ValueError(f'''Unsupported framework: {self.framework}''' ) UpperCAmelCase__ = scores.tolist() UpperCAmelCase__ = ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCAmelCase , _UpperCAmelCase )]
346
1
'''simple docstring''' import doctest import logging import os import unittest from pathlib import Path from typing import List, Union import transformers from transformers.testing_utils import require_tf, require_torch, slow UpperCAmelCase_ = logging.getLogger() @unittest.skip("""Temporarily disable the doc tests.""" ) @require_torch @require_tf @slow class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : str , _UpperCAmelCase : Path , _UpperCAmelCase : Union[str, None] = None , _UpperCAmelCase : Union[List[str], None] = None , _UpperCAmelCase : Union[str, List[str], None] = None , _UpperCAmelCase : bool = True , ): """simple docstring""" UpperCAmelCase__ = [file for file in os.listdir(_UpperCAmelCase ) if os.path.isfile(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) )] if identifier is not None: UpperCAmelCase__ = [file for file in files if identifier in file] if n_identifier is not None: if isinstance(_UpperCAmelCase , _UpperCAmelCase ): for n_ in n_identifier: UpperCAmelCase__ = [file for file in files if n_ not in file] else: UpperCAmelCase__ = [file for file in files if n_identifier not in file] UpperCAmelCase__ = ignore_files or [] ignore_files.append("""__init__.py""" ) UpperCAmelCase__ = [file for file in files if file not in ignore_files] for file in files: # Open all files print("""Testing""" , _UpperCAmelCase ) if only_modules: UpperCAmelCase__ = file.split(""".""" )[0] try: UpperCAmelCase__ = getattr(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = doctest.DocTestSuite(_UpperCAmelCase ) UpperCAmelCase__ = unittest.TextTestRunner().run(_UpperCAmelCase ) self.assertIs(len(result.failures ) , 0 ) except AttributeError: logger.info(f'''{module_identifier} is not a module.''' ) else: UpperCAmelCase__ = doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS ) self.assertIs(result.failed , 0 ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" UpperCAmelCase__ = Path("""src/transformers""" ) UpperCAmelCase__ = """modeling""" UpperCAmelCase__ = [ """modeling_ctrl.py""", """modeling_tf_ctrl.py""", ] self.analyze_directory(_UpperCAmelCase , identifier=_UpperCAmelCase , ignore_files=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" UpperCAmelCase__ = Path("""src/transformers""" ) UpperCAmelCase__ = """tokenization""" self.analyze_directory(_UpperCAmelCase , identifier=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" UpperCAmelCase__ = Path("""src/transformers""" ) UpperCAmelCase__ = """configuration""" self.analyze_directory(_UpperCAmelCase , identifier=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" UpperCAmelCase__ = Path("""src/transformers""" ) UpperCAmelCase__ = ["""configuration""", """modeling""", """tokenization"""] self.analyze_directory(_UpperCAmelCase , n_identifier=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" UpperCAmelCase__ = Path("""docs/source""" ) UpperCAmelCase__ = ["""favicon.ico"""] self.analyze_directory(_UpperCAmelCase , ignore_files=_UpperCAmelCase , only_modules=_UpperCAmelCase )
346
'''simple docstring''' from math import factorial def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int = 20 ): '''simple docstring''' UpperCAmelCase__ = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1, # 2, 3,... UpperCAmelCase__ = n // 2 return int(factorial(SCREAMING_SNAKE_CASE__ ) / (factorial(SCREAMING_SNAKE_CASE__ ) * factorial(n - k )) ) if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution(2_0)) else: try: UpperCAmelCase_ = int(sys.argv[1]) print(solution(n)) except ValueError: print('Invalid entry - please enter a number.')
346
1
'''simple docstring''' from abc import ABC, abstractmethod from typing import List, Optional class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' def __init__( self : Union[str, Any] ): """simple docstring""" self.test() def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" UpperCAmelCase__ = 0 UpperCAmelCase__ = False while not completed: if counter == 1: self.reset() UpperCAmelCase__ = self.advance() if not self.does_advance(_UpperCAmelCase ): raise Exception( """Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.""" ) UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.update(_UpperCAmelCase ) counter += 1 if counter > 1_00_00: raise Exception("""update() does not fulfill the constraint.""" ) if self.remaining() != 0: raise Exception("""Custom Constraint is not defined correctly.""" ) @abstractmethod def SCREAMING_SNAKE_CASE__ ( self : Dict ): """simple docstring""" raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : int ): """simple docstring""" raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def SCREAMING_SNAKE_CASE__ ( self : Any , _UpperCAmelCase : int ): """simple docstring""" raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): """simple docstring""" raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : List[Any]=False ): """simple docstring""" raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' def __init__( self : Union[str, Any] , _UpperCAmelCase : List[int] ): """simple docstring""" super(_UpperCAmelCase , self ).__init__() if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or len(_UpperCAmelCase ) == 0: raise ValueError(f'''`token_ids` has to be a non-empty list, but is {token_ids}.''' ) if any((not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or token_id < 0) for token_id in token_ids ): raise ValueError(f'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' ) UpperCAmelCase__ = token_ids UpperCAmelCase__ = len(self.token_ids ) UpperCAmelCase__ = -1 # the index of the currently fulfilled step UpperCAmelCase__ = False def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" if self.completed: return None return self.token_ids[self.fulfilled_idx + 1] def SCREAMING_SNAKE_CASE__ ( self : List[str] , _UpperCAmelCase : int ): """simple docstring""" if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(_UpperCAmelCase )}''' ) if self.completed: return False return token_id == self.token_ids[self.fulfilled_idx + 1] def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , _UpperCAmelCase : int ): """simple docstring""" if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(_UpperCAmelCase )}''' ) UpperCAmelCase__ = False UpperCAmelCase__ = False UpperCAmelCase__ = False if self.does_advance(_UpperCAmelCase ): self.fulfilled_idx += 1 UpperCAmelCase__ = True if self.fulfilled_idx == (self.seqlen - 1): UpperCAmelCase__ = True UpperCAmelCase__ = completed else: # failed to make progress. UpperCAmelCase__ = True self.reset() return stepped, completed, reset def SCREAMING_SNAKE_CASE__ ( self : Tuple ): """simple docstring""" UpperCAmelCase__ = False UpperCAmelCase__ = 0 def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" return self.seqlen - (self.fulfilled_idx + 1) def SCREAMING_SNAKE_CASE__ ( self : Any , _UpperCAmelCase : Optional[int]=False ): """simple docstring""" UpperCAmelCase__ = PhrasalConstraint(self.token_ids ) if stateful: UpperCAmelCase__ = self.seqlen UpperCAmelCase__ = self.fulfilled_idx UpperCAmelCase__ = self.completed return new_constraint class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Any , _UpperCAmelCase : List[List[int]] , _UpperCAmelCase : List[str]=True ): """simple docstring""" UpperCAmelCase__ = max([len(_UpperCAmelCase ) for one in nested_token_ids] ) UpperCAmelCase__ = {} for token_ids in nested_token_ids: UpperCAmelCase__ = root for tidx, token_id in enumerate(_UpperCAmelCase ): if token_id not in level: UpperCAmelCase__ = {} UpperCAmelCase__ = level[token_id] if no_subsets and self.has_subsets(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError( """Each list in `nested_token_ids` can't be a complete subset of another list, but is""" f''' {nested_token_ids}.''' ) UpperCAmelCase__ = root def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : int ): """simple docstring""" UpperCAmelCase__ = self.trie for current_token in current_seq: UpperCAmelCase__ = start[current_token] UpperCAmelCase__ = list(start.keys() ) return next_tokens def SCREAMING_SNAKE_CASE__ ( self : str , _UpperCAmelCase : Tuple ): """simple docstring""" UpperCAmelCase__ = self.next_tokens(_UpperCAmelCase ) return len(_UpperCAmelCase ) == 0 def SCREAMING_SNAKE_CASE__ ( self : List[str] , _UpperCAmelCase : Optional[int] ): """simple docstring""" UpperCAmelCase__ = list(root.values() ) if len(_UpperCAmelCase ) == 0: return 1 else: return sum([self.count_leaves(_UpperCAmelCase ) for nn in next_nodes] ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict ): """simple docstring""" UpperCAmelCase__ = self.count_leaves(_UpperCAmelCase ) return len(_UpperCAmelCase ) != leaf_count class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' def __init__( self : Dict , _UpperCAmelCase : List[List[int]] ): """simple docstring""" super(_UpperCAmelCase , self ).__init__() if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or len(_UpperCAmelCase ) == 0: raise ValueError(f'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' ) if any(not isinstance(_UpperCAmelCase , _UpperCAmelCase ) for token_ids in nested_token_ids ): raise ValueError(f'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' ) if any( any((not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or token_id < 0) for token_id in token_ids ) for token_ids in nested_token_ids ): raise ValueError( f'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' ) UpperCAmelCase__ = DisjunctiveTrie(_UpperCAmelCase ) UpperCAmelCase__ = nested_token_ids UpperCAmelCase__ = self.trie.max_height UpperCAmelCase__ = [] UpperCAmelCase__ = False def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" UpperCAmelCase__ = self.trie.next_tokens(self.current_seq ) if len(_UpperCAmelCase ) == 0: return None else: return token_list def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : int ): """simple docstring""" if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(_UpperCAmelCase )}''' ) UpperCAmelCase__ = self.trie.next_tokens(self.current_seq ) return token_id in next_tokens def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : int ): """simple docstring""" if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(_UpperCAmelCase )}''' ) UpperCAmelCase__ = False UpperCAmelCase__ = False UpperCAmelCase__ = False if self.does_advance(_UpperCAmelCase ): self.current_seq.append(_UpperCAmelCase ) UpperCAmelCase__ = True else: UpperCAmelCase__ = True self.reset() UpperCAmelCase__ = self.trie.reached_leaf(self.current_seq ) UpperCAmelCase__ = completed return stepped, completed, reset def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" UpperCAmelCase__ = False UpperCAmelCase__ = [] def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" if self.completed: # since this can be completed without reaching max height return 0 else: return self.seqlen - len(self.current_seq ) def SCREAMING_SNAKE_CASE__ ( self : Tuple , _UpperCAmelCase : Dict=False ): """simple docstring""" UpperCAmelCase__ = DisjunctiveConstraint(self.token_ids ) if stateful: UpperCAmelCase__ = self.seqlen UpperCAmelCase__ = self.current_seq UpperCAmelCase__ = self.completed return new_constraint class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Optional[int] , _UpperCAmelCase : List[Constraint] ): """simple docstring""" UpperCAmelCase__ = constraints # max # of steps required to fulfill a given constraint UpperCAmelCase__ = max([c.seqlen for c in constraints] ) UpperCAmelCase__ = len(_UpperCAmelCase ) UpperCAmelCase__ = False self.init_state() def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" UpperCAmelCase__ = [] UpperCAmelCase__ = None UpperCAmelCase__ = [constraint.copy(stateful=_UpperCAmelCase ) for constraint in self.constraints] def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" UpperCAmelCase__ = 0 if self.inprogress_constraint: # extra points for having a constraint mid-fulfilled add += self.max_seqlen - self.inprogress_constraint.remaining() return (len(self.complete_constraints ) * self.max_seqlen) + add def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): """simple docstring""" UpperCAmelCase__ = [] if self.inprogress_constraint is None: for constraint in self.pending_constraints: # "pending" == "unfulfilled yet" UpperCAmelCase__ = constraint.advance() if isinstance(_UpperCAmelCase , _UpperCAmelCase ): token_list.append(_UpperCAmelCase ) elif isinstance(_UpperCAmelCase , _UpperCAmelCase ): token_list.extend(_UpperCAmelCase ) else: UpperCAmelCase__ = self.inprogress_constraint.advance() if isinstance(_UpperCAmelCase , _UpperCAmelCase ): token_list.append(_UpperCAmelCase ) elif isinstance(_UpperCAmelCase , _UpperCAmelCase ): token_list.extend(_UpperCAmelCase ) if len(_UpperCAmelCase ) == 0: return None else: return token_list def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : Optional[List[int]] ): """simple docstring""" self.init_state() if token_ids is not None: for token in token_ids: # completes or steps **one** constraint UpperCAmelCase__ , UpperCAmelCase__ = self.add(_UpperCAmelCase ) # the entire list of constraints are fulfilled if self.completed: break def SCREAMING_SNAKE_CASE__ ( self : Any , _UpperCAmelCase : int ): """simple docstring""" if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError(f'''`token_id` should be an `int`, but is `{token_id}`.''' ) UpperCAmelCase__ , UpperCAmelCase__ = False, False if self.completed: UpperCAmelCase__ = True UpperCAmelCase__ = False return complete, stepped if self.inprogress_constraint is not None: # In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current # job, simply update the state UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.inprogress_constraint.update(_UpperCAmelCase ) if reset: # 1. If the next token breaks the progress, then we must restart. # e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books". # But that doesn't mean we self.init_state(), since we only reset the state for this particular # constraint, not the full list of constraints. self.pending_constraints.append(self.inprogress_constraint.copy(stateful=_UpperCAmelCase ) ) UpperCAmelCase__ = None if complete: # 2. If the next token completes the constraint, move it to completed list, set # inprogress to None. If there are no pending constraints either, then this full list of constraints # is complete. self.complete_constraints.append(self.inprogress_constraint ) UpperCAmelCase__ = None if len(self.pending_constraints ) == 0: # we're done! UpperCAmelCase__ = True else: # Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list # of constraints? for cidx, pending_constraint in enumerate(self.pending_constraints ): if pending_constraint.does_advance(_UpperCAmelCase ): UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = pending_constraint.update(_UpperCAmelCase ) if not stepped: raise Exception( """`constraint.update(token_id)` is not yielding incremental progress, """ """even though `constraint.does_advance(token_id)` is true.""" ) if complete: self.complete_constraints.append(_UpperCAmelCase ) UpperCAmelCase__ = None if not complete and stepped: UpperCAmelCase__ = pending_constraint if complete or stepped: # If we made any progress at all, then it's at least not a "pending constraint". UpperCAmelCase__ = ( self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :] ) if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None: # If there's no longer any pending after this and no inprogress either, then we must be # complete. UpperCAmelCase__ = True break # prevent accidentally stepping through multiple constraints with just one token. return complete, stepped def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : List[Any]=True ): """simple docstring""" UpperCAmelCase__ = ConstraintListState(self.constraints ) # we actually never though self.constraints objects # throughout this process. So it's at initialization state. if stateful: UpperCAmelCase__ = [ constraint.copy(stateful=_UpperCAmelCase ) for constraint in self.complete_constraints ] if self.inprogress_constraint is not None: UpperCAmelCase__ = self.inprogress_constraint.copy(stateful=_UpperCAmelCase ) UpperCAmelCase__ = [constraint.copy() for constraint in self.pending_constraints] return new_state
346
'''simple docstring''' import json import os import unittest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ : int = MgpstrTokenizer lowerCAmelCase_ : List[str] = False lowerCAmelCase_ : Optional[int] = {} lowerCAmelCase_ : Any = False def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): """simple docstring""" super().setUp() # fmt: off UpperCAmelCase__ = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""] # fmt: on UpperCAmelCase__ = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) ) UpperCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(_UpperCAmelCase ) + """\n""" ) def SCREAMING_SNAKE_CASE__ ( self : List[str] , **_UpperCAmelCase : Optional[Any] ): """simple docstring""" return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : Tuple ): """simple docstring""" UpperCAmelCase__ = """tester""" UpperCAmelCase__ = """tester""" return input_text, output_text @unittest.skip("""MGP-STR always lower cases letters.""" ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" pass def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" UpperCAmelCase__ = self.get_tokenizers(do_lower_case=_UpperCAmelCase ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): UpperCAmelCase__ = """[SPECIAL_TOKEN]""" tokenizer.add_special_tokens({"""cls_token""": special_token} ) UpperCAmelCase__ = tokenizer.encode([special_token] , add_special_tokens=_UpperCAmelCase ) self.assertEqual(len(_UpperCAmelCase ) , 1 ) UpperCAmelCase__ = tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase ) self.assertTrue(special_token not in decoded ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" UpperCAmelCase__ = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): UpperCAmelCase__ , UpperCAmelCase__ = self.get_input_output_texts(_UpperCAmelCase ) UpperCAmelCase__ = tokenizer.tokenize(_UpperCAmelCase ) UpperCAmelCase__ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) UpperCAmelCase__ = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = tokenizer.convert_ids_to_tokens(_UpperCAmelCase ) self.assertNotEqual(len(_UpperCAmelCase ) , 0 ) UpperCAmelCase__ = tokenizer.decode(_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase ) self.assertEqual(text_a.replace(""" """ , """""" ) , _UpperCAmelCase ) @unittest.skip("""MGP-STR tokenizer only handles one sequence.""" ) def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" pass @unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""" ) def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" pass
346
1
'''simple docstring''' import tempfile import torch from diffusers import PNDMScheduler from .test_schedulers import SchedulerCommonTest class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' lowerCAmelCase_ : Union[str, Any] = (PNDMScheduler,) lowerCAmelCase_ : Optional[int] = (("""num_inference_steps""", 50),) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , **_UpperCAmelCase : Optional[int] ): """simple docstring""" UpperCAmelCase__ = { """num_train_timesteps""": 10_00, """beta_start""": 0.0001, """beta_end""": 0.02, """beta_schedule""": """linear""", } config.update(**_UpperCAmelCase ) return config def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : Tuple=0 , **_UpperCAmelCase : List[str] ): """simple docstring""" UpperCAmelCase__ = dict(self.forward_default_kwargs ) UpperCAmelCase__ = kwargs.pop("""num_inference_steps""" , _UpperCAmelCase ) UpperCAmelCase__ = self.dummy_sample UpperCAmelCase__ = 0.1 * sample UpperCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: UpperCAmelCase__ = self.get_scheduler_config(**_UpperCAmelCase ) UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residuals UpperCAmelCase__ = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_UpperCAmelCase ) UpperCAmelCase__ = scheduler_class.from_pretrained(_UpperCAmelCase ) new_scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residuals UpperCAmelCase__ = dummy_past_residuals[:] UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample UpperCAmelCase__ = new_scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample UpperCAmelCase__ = new_scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" pass def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : Union[str, Any]=0 , **_UpperCAmelCase : Optional[int] ): """simple docstring""" UpperCAmelCase__ = dict(self.forward_default_kwargs ) UpperCAmelCase__ = kwargs.pop("""num_inference_steps""" , _UpperCAmelCase ) UpperCAmelCase__ = self.dummy_sample UpperCAmelCase__ = 0.1 * sample UpperCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: UpperCAmelCase__ = self.get_scheduler_config() UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residuals (must be after setting timesteps) UpperCAmelCase__ = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_UpperCAmelCase ) UpperCAmelCase__ = scheduler_class.from_pretrained(_UpperCAmelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residual (must be after setting timesteps) UpperCAmelCase__ = dummy_past_residuals[:] UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample UpperCAmelCase__ = new_scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample UpperCAmelCase__ = new_scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def SCREAMING_SNAKE_CASE__ ( self : int , **_UpperCAmelCase : Tuple ): """simple docstring""" UpperCAmelCase__ = self.scheduler_classes[0] UpperCAmelCase__ = self.get_scheduler_config(**_UpperCAmelCase ) UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) UpperCAmelCase__ = 10 UpperCAmelCase__ = self.dummy_model() UpperCAmelCase__ = self.dummy_sample_deter scheduler.set_timesteps(_UpperCAmelCase ) for i, t in enumerate(scheduler.prk_timesteps ): UpperCAmelCase__ = model(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample for i, t in enumerate(scheduler.plms_timesteps ): UpperCAmelCase__ = model(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample return sample def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" UpperCAmelCase__ = dict(self.forward_default_kwargs ) UpperCAmelCase__ = kwargs.pop("""num_inference_steps""" , _UpperCAmelCase ) for scheduler_class in self.scheduler_classes: UpperCAmelCase__ = self.get_scheduler_config() UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) UpperCAmelCase__ = self.dummy_sample UpperCAmelCase__ = 0.1 * sample if num_inference_steps is not None and hasattr(_UpperCAmelCase , """set_timesteps""" ): scheduler.set_timesteps(_UpperCAmelCase ) elif num_inference_steps is not None and not hasattr(_UpperCAmelCase , """set_timesteps""" ): UpperCAmelCase__ = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) UpperCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] UpperCAmelCase__ = dummy_past_residuals[:] UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , 0 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , 1 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , 0 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , 1 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def SCREAMING_SNAKE_CASE__ ( self : int ): """simple docstring""" for timesteps in [1_00, 10_00]: self.check_over_configs(num_train_timesteps=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" for steps_offset in [0, 1]: self.check_over_configs(steps_offset=_UpperCAmelCase ) UpperCAmelCase__ = self.scheduler_classes[0] UpperCAmelCase__ = self.get_scheduler_config(steps_offset=1 ) UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(10 ) assert torch.equal( scheduler.timesteps , torch.LongTensor( [9_01, 8_51, 8_51, 8_01, 8_01, 7_51, 7_51, 7_01, 7_01, 6_51, 6_51, 6_01, 6_01, 5_01, 4_01, 3_01, 2_01, 1_01, 1] ) , ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): """simple docstring""" for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ): self.check_over_configs(beta_start=_UpperCAmelCase , beta_end=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" for t in [1, 5, 10]: self.check_over_forward(time_step=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00] ): self.check_over_forward(num_inference_steps=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" UpperCAmelCase__ = 27 for scheduler_class in self.scheduler_classes: UpperCAmelCase__ = self.dummy_sample UpperCAmelCase__ = 0.1 * sample UpperCAmelCase__ = self.get_scheduler_config() UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(_UpperCAmelCase ) # before power of 3 fix, would error on first step, so we only need to do two for i, t in enumerate(scheduler.prk_timesteps[:2] ): UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample def SCREAMING_SNAKE_CASE__ ( self : Dict ): """simple docstring""" with self.assertRaises(_UpperCAmelCase ): UpperCAmelCase__ = self.scheduler_classes[0] UpperCAmelCase__ = self.get_scheduler_config() UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): """simple docstring""" UpperCAmelCase__ = self.full_loop() UpperCAmelCase__ = torch.sum(torch.abs(_UpperCAmelCase ) ) UpperCAmelCase__ = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_sum.item() - 198.1318 ) < 1E-2 assert abs(result_mean.item() - 0.2580 ) < 1E-3 def SCREAMING_SNAKE_CASE__ ( self : Dict ): """simple docstring""" UpperCAmelCase__ = self.full_loop(prediction_type="""v_prediction""" ) UpperCAmelCase__ = torch.sum(torch.abs(_UpperCAmelCase ) ) UpperCAmelCase__ = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_sum.item() - 67.3986 ) < 1E-2 assert abs(result_mean.item() - 0.0878 ) < 1E-3 def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" UpperCAmelCase__ = self.full_loop(set_alpha_to_one=_UpperCAmelCase , beta_start=0.01 ) UpperCAmelCase__ = torch.sum(torch.abs(_UpperCAmelCase ) ) UpperCAmelCase__ = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_sum.item() - 230.0399 ) < 1E-2 assert abs(result_mean.item() - 0.2995 ) < 1E-3 def SCREAMING_SNAKE_CASE__ ( self : int ): """simple docstring""" UpperCAmelCase__ = self.full_loop(set_alpha_to_one=_UpperCAmelCase , beta_start=0.01 ) UpperCAmelCase__ = torch.sum(torch.abs(_UpperCAmelCase ) ) UpperCAmelCase__ = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_sum.item() - 186.9482 ) < 1E-2 assert abs(result_mean.item() - 0.2434 ) < 1E-3
346
'''simple docstring''' from abc import ABC, abstractmethod from typing import List, Optional class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' def __init__( self : Union[str, Any] ): """simple docstring""" self.test() def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" UpperCAmelCase__ = 0 UpperCAmelCase__ = False while not completed: if counter == 1: self.reset() UpperCAmelCase__ = self.advance() if not self.does_advance(_UpperCAmelCase ): raise Exception( """Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.""" ) UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.update(_UpperCAmelCase ) counter += 1 if counter > 1_00_00: raise Exception("""update() does not fulfill the constraint.""" ) if self.remaining() != 0: raise Exception("""Custom Constraint is not defined correctly.""" ) @abstractmethod def SCREAMING_SNAKE_CASE__ ( self : Dict ): """simple docstring""" raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : int ): """simple docstring""" raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def SCREAMING_SNAKE_CASE__ ( self : Any , _UpperCAmelCase : int ): """simple docstring""" raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): """simple docstring""" raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : List[Any]=False ): """simple docstring""" raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' def __init__( self : Union[str, Any] , _UpperCAmelCase : List[int] ): """simple docstring""" super(_UpperCAmelCase , self ).__init__() if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or len(_UpperCAmelCase ) == 0: raise ValueError(f'''`token_ids` has to be a non-empty list, but is {token_ids}.''' ) if any((not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or token_id < 0) for token_id in token_ids ): raise ValueError(f'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' ) UpperCAmelCase__ = token_ids UpperCAmelCase__ = len(self.token_ids ) UpperCAmelCase__ = -1 # the index of the currently fulfilled step UpperCAmelCase__ = False def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" if self.completed: return None return self.token_ids[self.fulfilled_idx + 1] def SCREAMING_SNAKE_CASE__ ( self : List[str] , _UpperCAmelCase : int ): """simple docstring""" if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(_UpperCAmelCase )}''' ) if self.completed: return False return token_id == self.token_ids[self.fulfilled_idx + 1] def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , _UpperCAmelCase : int ): """simple docstring""" if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(_UpperCAmelCase )}''' ) UpperCAmelCase__ = False UpperCAmelCase__ = False UpperCAmelCase__ = False if self.does_advance(_UpperCAmelCase ): self.fulfilled_idx += 1 UpperCAmelCase__ = True if self.fulfilled_idx == (self.seqlen - 1): UpperCAmelCase__ = True UpperCAmelCase__ = completed else: # failed to make progress. UpperCAmelCase__ = True self.reset() return stepped, completed, reset def SCREAMING_SNAKE_CASE__ ( self : Tuple ): """simple docstring""" UpperCAmelCase__ = False UpperCAmelCase__ = 0 def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" return self.seqlen - (self.fulfilled_idx + 1) def SCREAMING_SNAKE_CASE__ ( self : Any , _UpperCAmelCase : Optional[int]=False ): """simple docstring""" UpperCAmelCase__ = PhrasalConstraint(self.token_ids ) if stateful: UpperCAmelCase__ = self.seqlen UpperCAmelCase__ = self.fulfilled_idx UpperCAmelCase__ = self.completed return new_constraint class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Any , _UpperCAmelCase : List[List[int]] , _UpperCAmelCase : List[str]=True ): """simple docstring""" UpperCAmelCase__ = max([len(_UpperCAmelCase ) for one in nested_token_ids] ) UpperCAmelCase__ = {} for token_ids in nested_token_ids: UpperCAmelCase__ = root for tidx, token_id in enumerate(_UpperCAmelCase ): if token_id not in level: UpperCAmelCase__ = {} UpperCAmelCase__ = level[token_id] if no_subsets and self.has_subsets(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError( """Each list in `nested_token_ids` can't be a complete subset of another list, but is""" f''' {nested_token_ids}.''' ) UpperCAmelCase__ = root def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : int ): """simple docstring""" UpperCAmelCase__ = self.trie for current_token in current_seq: UpperCAmelCase__ = start[current_token] UpperCAmelCase__ = list(start.keys() ) return next_tokens def SCREAMING_SNAKE_CASE__ ( self : str , _UpperCAmelCase : Tuple ): """simple docstring""" UpperCAmelCase__ = self.next_tokens(_UpperCAmelCase ) return len(_UpperCAmelCase ) == 0 def SCREAMING_SNAKE_CASE__ ( self : List[str] , _UpperCAmelCase : Optional[int] ): """simple docstring""" UpperCAmelCase__ = list(root.values() ) if len(_UpperCAmelCase ) == 0: return 1 else: return sum([self.count_leaves(_UpperCAmelCase ) for nn in next_nodes] ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict ): """simple docstring""" UpperCAmelCase__ = self.count_leaves(_UpperCAmelCase ) return len(_UpperCAmelCase ) != leaf_count class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' def __init__( self : Dict , _UpperCAmelCase : List[List[int]] ): """simple docstring""" super(_UpperCAmelCase , self ).__init__() if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or len(_UpperCAmelCase ) == 0: raise ValueError(f'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' ) if any(not isinstance(_UpperCAmelCase , _UpperCAmelCase ) for token_ids in nested_token_ids ): raise ValueError(f'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' ) if any( any((not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or token_id < 0) for token_id in token_ids ) for token_ids in nested_token_ids ): raise ValueError( f'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' ) UpperCAmelCase__ = DisjunctiveTrie(_UpperCAmelCase ) UpperCAmelCase__ = nested_token_ids UpperCAmelCase__ = self.trie.max_height UpperCAmelCase__ = [] UpperCAmelCase__ = False def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" UpperCAmelCase__ = self.trie.next_tokens(self.current_seq ) if len(_UpperCAmelCase ) == 0: return None else: return token_list def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : int ): """simple docstring""" if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(_UpperCAmelCase )}''' ) UpperCAmelCase__ = self.trie.next_tokens(self.current_seq ) return token_id in next_tokens def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : int ): """simple docstring""" if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(_UpperCAmelCase )}''' ) UpperCAmelCase__ = False UpperCAmelCase__ = False UpperCAmelCase__ = False if self.does_advance(_UpperCAmelCase ): self.current_seq.append(_UpperCAmelCase ) UpperCAmelCase__ = True else: UpperCAmelCase__ = True self.reset() UpperCAmelCase__ = self.trie.reached_leaf(self.current_seq ) UpperCAmelCase__ = completed return stepped, completed, reset def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" UpperCAmelCase__ = False UpperCAmelCase__ = [] def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" if self.completed: # since this can be completed without reaching max height return 0 else: return self.seqlen - len(self.current_seq ) def SCREAMING_SNAKE_CASE__ ( self : Tuple , _UpperCAmelCase : Dict=False ): """simple docstring""" UpperCAmelCase__ = DisjunctiveConstraint(self.token_ids ) if stateful: UpperCAmelCase__ = self.seqlen UpperCAmelCase__ = self.current_seq UpperCAmelCase__ = self.completed return new_constraint class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Optional[int] , _UpperCAmelCase : List[Constraint] ): """simple docstring""" UpperCAmelCase__ = constraints # max # of steps required to fulfill a given constraint UpperCAmelCase__ = max([c.seqlen for c in constraints] ) UpperCAmelCase__ = len(_UpperCAmelCase ) UpperCAmelCase__ = False self.init_state() def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" UpperCAmelCase__ = [] UpperCAmelCase__ = None UpperCAmelCase__ = [constraint.copy(stateful=_UpperCAmelCase ) for constraint in self.constraints] def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" UpperCAmelCase__ = 0 if self.inprogress_constraint: # extra points for having a constraint mid-fulfilled add += self.max_seqlen - self.inprogress_constraint.remaining() return (len(self.complete_constraints ) * self.max_seqlen) + add def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): """simple docstring""" UpperCAmelCase__ = [] if self.inprogress_constraint is None: for constraint in self.pending_constraints: # "pending" == "unfulfilled yet" UpperCAmelCase__ = constraint.advance() if isinstance(_UpperCAmelCase , _UpperCAmelCase ): token_list.append(_UpperCAmelCase ) elif isinstance(_UpperCAmelCase , _UpperCAmelCase ): token_list.extend(_UpperCAmelCase ) else: UpperCAmelCase__ = self.inprogress_constraint.advance() if isinstance(_UpperCAmelCase , _UpperCAmelCase ): token_list.append(_UpperCAmelCase ) elif isinstance(_UpperCAmelCase , _UpperCAmelCase ): token_list.extend(_UpperCAmelCase ) if len(_UpperCAmelCase ) == 0: return None else: return token_list def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : Optional[List[int]] ): """simple docstring""" self.init_state() if token_ids is not None: for token in token_ids: # completes or steps **one** constraint UpperCAmelCase__ , UpperCAmelCase__ = self.add(_UpperCAmelCase ) # the entire list of constraints are fulfilled if self.completed: break def SCREAMING_SNAKE_CASE__ ( self : Any , _UpperCAmelCase : int ): """simple docstring""" if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError(f'''`token_id` should be an `int`, but is `{token_id}`.''' ) UpperCAmelCase__ , UpperCAmelCase__ = False, False if self.completed: UpperCAmelCase__ = True UpperCAmelCase__ = False return complete, stepped if self.inprogress_constraint is not None: # In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current # job, simply update the state UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.inprogress_constraint.update(_UpperCAmelCase ) if reset: # 1. If the next token breaks the progress, then we must restart. # e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books". # But that doesn't mean we self.init_state(), since we only reset the state for this particular # constraint, not the full list of constraints. self.pending_constraints.append(self.inprogress_constraint.copy(stateful=_UpperCAmelCase ) ) UpperCAmelCase__ = None if complete: # 2. If the next token completes the constraint, move it to completed list, set # inprogress to None. If there are no pending constraints either, then this full list of constraints # is complete. self.complete_constraints.append(self.inprogress_constraint ) UpperCAmelCase__ = None if len(self.pending_constraints ) == 0: # we're done! UpperCAmelCase__ = True else: # Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list # of constraints? for cidx, pending_constraint in enumerate(self.pending_constraints ): if pending_constraint.does_advance(_UpperCAmelCase ): UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = pending_constraint.update(_UpperCAmelCase ) if not stepped: raise Exception( """`constraint.update(token_id)` is not yielding incremental progress, """ """even though `constraint.does_advance(token_id)` is true.""" ) if complete: self.complete_constraints.append(_UpperCAmelCase ) UpperCAmelCase__ = None if not complete and stepped: UpperCAmelCase__ = pending_constraint if complete or stepped: # If we made any progress at all, then it's at least not a "pending constraint". UpperCAmelCase__ = ( self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :] ) if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None: # If there's no longer any pending after this and no inprogress either, then we must be # complete. UpperCAmelCase__ = True break # prevent accidentally stepping through multiple constraints with just one token. return complete, stepped def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : List[Any]=True ): """simple docstring""" UpperCAmelCase__ = ConstraintListState(self.constraints ) # we actually never though self.constraints objects # throughout this process. So it's at initialization state. if stateful: UpperCAmelCase__ = [ constraint.copy(stateful=_UpperCAmelCase ) for constraint in self.complete_constraints ] if self.inprogress_constraint is not None: UpperCAmelCase__ = self.inprogress_constraint.copy(stateful=_UpperCAmelCase ) UpperCAmelCase__ = [constraint.copy() for constraint in self.pending_constraints] return new_state
346
1
'''simple docstring''' from math import factorial def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int = 20 ): '''simple docstring''' UpperCAmelCase__ = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1, # 2, 3,... UpperCAmelCase__ = n // 2 return int(factorial(SCREAMING_SNAKE_CASE__ ) / (factorial(SCREAMING_SNAKE_CASE__ ) * factorial(n - k )) ) if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution(2_0)) else: try: UpperCAmelCase_ = int(sys.argv[1]) print(solution(n)) except ValueError: print('Invalid entry - please enter a number.')
346
'''simple docstring''' import doctest import logging import os import unittest from pathlib import Path from typing import List, Union import transformers from transformers.testing_utils import require_tf, require_torch, slow UpperCAmelCase_ = logging.getLogger() @unittest.skip("""Temporarily disable the doc tests.""" ) @require_torch @require_tf @slow class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : str , _UpperCAmelCase : Path , _UpperCAmelCase : Union[str, None] = None , _UpperCAmelCase : Union[List[str], None] = None , _UpperCAmelCase : Union[str, List[str], None] = None , _UpperCAmelCase : bool = True , ): """simple docstring""" UpperCAmelCase__ = [file for file in os.listdir(_UpperCAmelCase ) if os.path.isfile(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) )] if identifier is not None: UpperCAmelCase__ = [file for file in files if identifier in file] if n_identifier is not None: if isinstance(_UpperCAmelCase , _UpperCAmelCase ): for n_ in n_identifier: UpperCAmelCase__ = [file for file in files if n_ not in file] else: UpperCAmelCase__ = [file for file in files if n_identifier not in file] UpperCAmelCase__ = ignore_files or [] ignore_files.append("""__init__.py""" ) UpperCAmelCase__ = [file for file in files if file not in ignore_files] for file in files: # Open all files print("""Testing""" , _UpperCAmelCase ) if only_modules: UpperCAmelCase__ = file.split(""".""" )[0] try: UpperCAmelCase__ = getattr(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = doctest.DocTestSuite(_UpperCAmelCase ) UpperCAmelCase__ = unittest.TextTestRunner().run(_UpperCAmelCase ) self.assertIs(len(result.failures ) , 0 ) except AttributeError: logger.info(f'''{module_identifier} is not a module.''' ) else: UpperCAmelCase__ = doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS ) self.assertIs(result.failed , 0 ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" UpperCAmelCase__ = Path("""src/transformers""" ) UpperCAmelCase__ = """modeling""" UpperCAmelCase__ = [ """modeling_ctrl.py""", """modeling_tf_ctrl.py""", ] self.analyze_directory(_UpperCAmelCase , identifier=_UpperCAmelCase , ignore_files=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" UpperCAmelCase__ = Path("""src/transformers""" ) UpperCAmelCase__ = """tokenization""" self.analyze_directory(_UpperCAmelCase , identifier=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" UpperCAmelCase__ = Path("""src/transformers""" ) UpperCAmelCase__ = """configuration""" self.analyze_directory(_UpperCAmelCase , identifier=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" UpperCAmelCase__ = Path("""src/transformers""" ) UpperCAmelCase__ = ["""configuration""", """modeling""", """tokenization"""] self.analyze_directory(_UpperCAmelCase , n_identifier=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" UpperCAmelCase__ = Path("""docs/source""" ) UpperCAmelCase__ = ["""favicon.ico"""] self.analyze_directory(_UpperCAmelCase , ignore_files=_UpperCAmelCase , only_modules=_UpperCAmelCase )
346
1
'''simple docstring''' import argparse import requests import torch from PIL import Image from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Any ): '''simple docstring''' if "cls_token" in name: UpperCAmelCase__ = name.replace("""cls_token""" , """vit.embeddings.cls_token""" ) if "mask_token" in name: UpperCAmelCase__ = name.replace("""mask_token""" , """decoder.mask_token""" ) if "decoder_pos_embed" in name: UpperCAmelCase__ = name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" ) if "pos_embed" in name and "decoder" not in name: UpperCAmelCase__ = name.replace("""pos_embed""" , """vit.embeddings.position_embeddings""" ) if "patch_embed.proj" in name: UpperCAmelCase__ = name.replace("""patch_embed.proj""" , """vit.embeddings.patch_embeddings.projection""" ) if "patch_embed.norm" in name: UpperCAmelCase__ = name.replace("""patch_embed.norm""" , """vit.embeddings.norm""" ) if "decoder_blocks" in name: UpperCAmelCase__ = name.replace("""decoder_blocks""" , """decoder.decoder_layers""" ) if "blocks" in name: UpperCAmelCase__ = name.replace("""blocks""" , """vit.encoder.layer""" ) if "attn.proj" in name: UpperCAmelCase__ = name.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in name: UpperCAmelCase__ = name.replace("""attn""" , """attention.self""" ) if "norm1" in name: UpperCAmelCase__ = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: UpperCAmelCase__ = name.replace("""norm2""" , """layernorm_after""" ) if "mlp.fc1" in name: UpperCAmelCase__ = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: UpperCAmelCase__ = name.replace("""mlp.fc2""" , """output.dense""" ) if "decoder_embed" in name: UpperCAmelCase__ = name.replace("""decoder_embed""" , """decoder.decoder_embed""" ) if "decoder_norm" in name: UpperCAmelCase__ = name.replace("""decoder_norm""" , """decoder.decoder_norm""" ) if "decoder_pred" in name: UpperCAmelCase__ = name.replace("""decoder_pred""" , """decoder.decoder_pred""" ) if "norm.weight" in name and "decoder" not in name: UpperCAmelCase__ = name.replace("""norm.weight""" , """vit.layernorm.weight""" ) if "norm.bias" in name and "decoder" not in name: UpperCAmelCase__ = name.replace("""norm.bias""" , """vit.layernorm.bias""" ) return name def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] ): '''simple docstring''' for key in orig_state_dict.copy().keys(): UpperCAmelCase__ = orig_state_dict.pop(SCREAMING_SNAKE_CASE__ ) if "qkv" in key: UpperCAmelCase__ = key.split(""".""" ) UpperCAmelCase__ = int(key_split[1] ) if "decoder_blocks" in key: UpperCAmelCase__ = config.decoder_hidden_size UpperCAmelCase__ = """decoder.decoder_layers.""" if "weight" in key: UpperCAmelCase__ = val[:dim, :] UpperCAmelCase__ = val[dim : dim * 2, :] UpperCAmelCase__ = val[-dim:, :] elif "bias" in key: UpperCAmelCase__ = val[:dim] UpperCAmelCase__ = val[dim : dim * 2] UpperCAmelCase__ = val[-dim:] else: UpperCAmelCase__ = config.hidden_size UpperCAmelCase__ = """vit.encoder.layer.""" if "weight" in key: UpperCAmelCase__ = val[:dim, :] UpperCAmelCase__ = val[dim : dim * 2, :] UpperCAmelCase__ = val[-dim:, :] elif "bias" in key: UpperCAmelCase__ = val[:dim] UpperCAmelCase__ = val[dim : dim * 2] UpperCAmelCase__ = val[-dim:] else: UpperCAmelCase__ = val return orig_state_dict def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ = ViTMAEConfig() if "large" in checkpoint_url: UpperCAmelCase__ = 1024 UpperCAmelCase__ = 4096 UpperCAmelCase__ = 24 UpperCAmelCase__ = 16 elif "huge" in checkpoint_url: UpperCAmelCase__ = 14 UpperCAmelCase__ = 1280 UpperCAmelCase__ = 5120 UpperCAmelCase__ = 32 UpperCAmelCase__ = 16 UpperCAmelCase__ = ViTMAEForPreTraining(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ , map_location="""cpu""" )["""model"""] UpperCAmelCase__ = ViTMAEImageProcessor(size=config.image_size ) UpperCAmelCase__ = convert_state_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) model.load_state_dict(SCREAMING_SNAKE_CASE__ ) model.eval() UpperCAmelCase__ = """https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg""" UpperCAmelCase__ = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw ) UpperCAmelCase__ = ViTMAEImageProcessor(size=config.image_size ) UpperCAmelCase__ = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ) # forward pass torch.manual_seed(2 ) UpperCAmelCase__ = model(**SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = outputs.logits if "large" in checkpoint_url: UpperCAmelCase__ = torch.tensor( [[-0.73_09, -0.71_28, -1.01_69], [-1.01_61, -0.90_58, -1.18_78], [-1.04_78, -0.94_11, -1.19_11]] ) elif "huge" in checkpoint_url: UpperCAmelCase__ = torch.tensor( [[-1.15_99, -0.91_99, -1.22_21], [-1.19_52, -0.92_69, -1.23_07], [-1.21_43, -0.93_37, -1.22_62]] ) else: UpperCAmelCase__ = torch.tensor( [[-0.91_92, -0.84_81, -1.12_59], [-1.13_49, -1.00_34, -1.25_99], [-1.17_57, -1.04_29, -1.27_26]] ) # verify logits assert torch.allclose(logits[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) print(F'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(SCREAMING_SNAKE_CASE__ ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint_url', default='https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth', type=str, help='URL of the checkpoint you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) UpperCAmelCase_ = parser.parse_args() convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
346
'''simple docstring''' from datasets.utils.patching import _PatchedModuleObj, patch_submodule from . import _test_patching def _UpperCamelCase ( ): '''simple docstring''' import os as original_os from os import path as original_path from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join UpperCAmelCase__ = """__test_patch_submodule_mock__""" with patch_submodule(_test_patching , """os.path.join""" , SCREAMING_SNAKE_CASE__ ): # Every way to access os.path.join must be patched, and the rest must stay untouched # check os.path.join assert isinstance(_test_patching.os , _PatchedModuleObj ) assert isinstance(_test_patching.os.path , _PatchedModuleObj ) assert _test_patching.os.path.join is mock # check path.join assert isinstance(_test_patching.path , _PatchedModuleObj ) assert _test_patching.path.join is mock # check join assert _test_patching.join is mock # check that the other attributes are untouched assert _test_patching.os.rename is original_rename assert _test_patching.path.dirname is original_dirname assert _test_patching.os.path.dirname is original_dirname # Even renamed modules or objects must be patched # check renamed_os.path.join assert isinstance(_test_patching.renamed_os , _PatchedModuleObj ) assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj ) assert _test_patching.renamed_os.path.join is mock # check renamed_path.join assert isinstance(_test_patching.renamed_path , _PatchedModuleObj ) assert _test_patching.renamed_path.join is mock # check renamed_join assert _test_patching.renamed_join is mock # check that the other attributes are untouched assert _test_patching.renamed_os.rename is original_rename assert _test_patching.renamed_path.dirname is original_dirname assert _test_patching.renamed_os.path.dirname is original_dirname # check that everthing is back to normal when the patch is over assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join def _UpperCamelCase ( ): '''simple docstring''' assert _test_patching.open is open UpperCAmelCase__ = """__test_patch_submodule_builtin_mock__""" # _test_patching has "open" in its globals assert _test_patching.open is open with patch_submodule(_test_patching , """open""" , SCREAMING_SNAKE_CASE__ ): assert _test_patching.open is mock # check that everthing is back to normal when the patch is over assert _test_patching.open is open def _UpperCamelCase ( ): '''simple docstring''' UpperCAmelCase__ = """__test_patch_submodule_missing_mock__""" with patch_submodule(_test_patching , """pandas.read_csv""" , SCREAMING_SNAKE_CASE__ ): pass def _UpperCamelCase ( ): '''simple docstring''' UpperCAmelCase__ = """__test_patch_submodule_missing_builtin_mock__""" # _test_patching doesn't have "len" in its globals assert getattr(_test_patching , """len""" , SCREAMING_SNAKE_CASE__ ) is None with patch_submodule(_test_patching , """len""" , SCREAMING_SNAKE_CASE__ ): assert _test_patching.len is mock assert _test_patching.len is len def _UpperCamelCase ( ): '''simple docstring''' UpperCAmelCase__ = """__test_patch_submodule_start_and_stop_mock__""" UpperCAmelCase__ = patch_submodule(_test_patching , """open""" , SCREAMING_SNAKE_CASE__ ) assert _test_patching.open is open patch.start() assert _test_patching.open is mock patch.stop() assert _test_patching.open is open def _UpperCamelCase ( ): '''simple docstring''' from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join UpperCAmelCase__ = """__test_patch_submodule_successive_join__""" UpperCAmelCase__ = """__test_patch_submodule_successive_dirname__""" UpperCAmelCase__ = """__test_patch_submodule_successive_rename__""" assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename with patch_submodule(_test_patching , """os.path.join""" , SCREAMING_SNAKE_CASE__ ): with patch_submodule(_test_patching , """os.rename""" , SCREAMING_SNAKE_CASE__ ): with patch_submodule(_test_patching , """os.path.dirname""" , SCREAMING_SNAKE_CASE__ ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename # try another order with patch_submodule(_test_patching , """os.rename""" , SCREAMING_SNAKE_CASE__ ): with patch_submodule(_test_patching , """os.path.join""" , SCREAMING_SNAKE_CASE__ ): with patch_submodule(_test_patching , """os.path.dirname""" , SCREAMING_SNAKE_CASE__ ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename def _UpperCamelCase ( ): '''simple docstring''' UpperCAmelCase__ = """__test_patch_submodule_doesnt_exist_mock__""" with patch_submodule(_test_patching , """__module_that_doesn_exist__.__attribute_that_doesn_exist__""" , SCREAMING_SNAKE_CASE__ ): pass with patch_submodule(_test_patching , """os.__attribute_that_doesn_exist__""" , SCREAMING_SNAKE_CASE__ ): pass
346
1
'''simple docstring''' import unittest from pathlib import Path from tempfile import TemporaryDirectory from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available from transformers.models.bert.tokenization_bert import BertTokenizer from transformers.testing_utils import require_tensorflow_text, require_tf, slow if is_tf_available(): import tensorflow as tf if is_tensorflow_text_available(): from transformers.models.bert import TFBertTokenizer UpperCAmelCase_ = ['bert-base-uncased', 'bert-base-cased'] UpperCAmelCase_ = 'hf-internal-testing/tiny-bert-tf-only' if is_tf_available(): class lowerCAmelCase_ ( tf.keras.Model ): '''simple docstring''' def __init__( self : List[str] , _UpperCAmelCase : List[Any] ): """simple docstring""" super().__init__() UpperCAmelCase__ = tokenizer UpperCAmelCase__ = AutoConfig.from_pretrained(_UpperCAmelCase ) UpperCAmelCase__ = TFAutoModel.from_config(_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Any , _UpperCAmelCase : Union[str, Any] ): """simple docstring""" UpperCAmelCase__ = self.tokenizer(_UpperCAmelCase ) UpperCAmelCase__ = self.bert(**_UpperCAmelCase ) return out["pooler_output"] @require_tf @require_tensorflow_text class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): """simple docstring""" super().setUp() UpperCAmelCase__ = [ BertTokenizer.from_pretrained(_UpperCAmelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2) ] # repeat for when fast_bert_tokenizer=false UpperCAmelCase__ = [TFBertTokenizer.from_pretrained(_UpperCAmelCase ) for checkpoint in TOKENIZER_CHECKPOINTS] + [ TFBertTokenizer.from_pretrained(_UpperCAmelCase , use_fast_bert_tokenizer=_UpperCAmelCase ) for checkpoint in TOKENIZER_CHECKPOINTS ] assert len(self.tokenizers ) == len(self.tf_tokenizers ) UpperCAmelCase__ = [ """This is a straightforward English test sentence.""", """This one has some weird characters\rto\nsee\r\nif those\u00E9break things.""", """Now we're going to add some Chinese: 一 二 三 一二三""", """And some much more rare Chinese: 齉 堃 齉堃""", """Je vais aussi écrire en français pour tester les accents""", """Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ""", ] UpperCAmelCase__ = list(zip(self.test_sentences , self.test_sentences[::-1] ) ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): """simple docstring""" for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ): for test_inputs in (self.test_sentences, self.paired_sentences): UpperCAmelCase__ = tokenizer(_UpperCAmelCase , return_tensors="""tf""" , padding="""longest""" ) UpperCAmelCase__ = tf_tokenizer(_UpperCAmelCase ) for key in python_outputs.keys(): self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) ) self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) ) @slow def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" for tf_tokenizer in self.tf_tokenizers: UpperCAmelCase__ = tf_tokenizer(self.paired_sentences ) UpperCAmelCase__ = tf_tokenizer( text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , ) for key in merged_outputs.keys(): self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) ) @slow def SCREAMING_SNAKE_CASE__ ( self : Tuple ): """simple docstring""" for tf_tokenizer in self.tf_tokenizers: UpperCAmelCase__ = tf.function(_UpperCAmelCase ) for test_inputs in (self.test_sentences, self.paired_sentences): UpperCAmelCase__ = tf.constant(_UpperCAmelCase ) UpperCAmelCase__ = compiled_tokenizer(_UpperCAmelCase ) UpperCAmelCase__ = tf_tokenizer(_UpperCAmelCase ) for key in eager_outputs.keys(): self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) ) @slow def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" for tf_tokenizer in self.tf_tokenizers: UpperCAmelCase__ = ModelToSave(tokenizer=_UpperCAmelCase ) UpperCAmelCase__ = tf.convert_to_tensor(self.test_sentences ) UpperCAmelCase__ = model(_UpperCAmelCase ) # Build model with some sample inputs with TemporaryDirectory() as tempdir: UpperCAmelCase__ = Path(_UpperCAmelCase ) / """saved.model""" model.save(_UpperCAmelCase ) UpperCAmelCase__ = tf.keras.models.load_model(_UpperCAmelCase ) UpperCAmelCase__ = loaded_model(_UpperCAmelCase ) # We may see small differences because the loaded model is compiled, so we need an epsilon for the test self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1E-5 )
346
'''simple docstring''' from timeit import timeit UpperCAmelCase_ = { 'MALAYALAM': True, 'String': False, 'rotor': True, 'level': True, 'A': True, 'BB': True, 'ABC': False, 'amanaplanacanalpanama': True, # "a man a plan a canal panama" } # Ensure our test data is valid assert all((key == key[::-1]) is value for key, value in test_data.items()) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' UpperCAmelCase__ = 0 UpperCAmelCase__ = len(SCREAMING_SNAKE_CASE__ ) - 1 while start_i < end_i: if s[start_i] == s[end_i]: start_i += 1 end_i -= 1 else: return False return True def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' UpperCAmelCase__ = len(SCREAMING_SNAKE_CASE__ ) // 2 UpperCAmelCase__ = len(SCREAMING_SNAKE_CASE__ ) # We need to traverse till half of the length of string # as we can get access of the i'th last element from # i'th index. # eg: [0,1,2,3,4,5] => 4th index can be accessed # with the help of 1st index (i==n-i-1) # where n is length of string return all(s[i] == s[n - i - 1] for i in range(SCREAMING_SNAKE_CASE__ ) ) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' if len(SCREAMING_SNAKE_CASE__ ) <= 2: return True if s[0] == s[len(SCREAMING_SNAKE_CASE__ ) - 1]: return is_palindrome_recursive(s[1:-1] ) else: return False def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' return s == s[::-1] def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' UpperCAmelCase__ = F'''all({name}(key) is value for key, value in test_data.items())''' UpperCAmelCase__ = F'''from __main__ import test_data, {name}''' UpperCAmelCase__ = 500000 UpperCAmelCase__ = timeit(stmt=SCREAMING_SNAKE_CASE__ , setup=SCREAMING_SNAKE_CASE__ , number=SCREAMING_SNAKE_CASE__ ) print(F'''{name:<35} finished {number:,} runs in {result:.5f} seconds''' ) if __name__ == "__main__": for key, value in test_data.items(): assert is_palindrome(key) is is_palindrome_recursive(key) assert is_palindrome(key) is is_palindrome_slice(key) print(f"{key:21} {value}") print('a man a plan a canal panama') # finished 500,000 runs in 0.46793 seconds benchmark_function('is_palindrome_slice') # finished 500,000 runs in 0.85234 seconds benchmark_function('is_palindrome') # finished 500,000 runs in 1.32028 seconds benchmark_function('is_palindrome_recursive') # finished 500,000 runs in 2.08679 seconds benchmark_function('is_palindrome_traversal')
346
1
'''simple docstring''' from typing import Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING UpperCAmelCase_ = logging.get_logger(__name__) @add_end_docstrings(lowerCamelCase_ ) class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' def __init__( self : List[Any] , *_UpperCAmelCase : Dict , **_UpperCAmelCase : Optional[int] ): """simple docstring""" super().__init__(*_UpperCAmelCase , **_UpperCAmelCase ) self.check_model_type(_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Any , _UpperCAmelCase : Union[str, Any]=None , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : Optional[Any]=None , **_UpperCAmelCase : Union[str, Any] ): """simple docstring""" UpperCAmelCase__ , UpperCAmelCase__ = {}, {} if padding is not None: UpperCAmelCase__ = padding if truncation is not None: UpperCAmelCase__ = truncation if top_k is not None: UpperCAmelCase__ = top_k return preprocess_params, {}, postprocess_params def __call__( self : Dict , _UpperCAmelCase : Union["Image.Image", str] , _UpperCAmelCase : str = None , **_UpperCAmelCase : List[str] ): """simple docstring""" if isinstance(_UpperCAmelCase , (Image.Image, str) ) and isinstance(_UpperCAmelCase , _UpperCAmelCase ): UpperCAmelCase__ = {"""image""": image, """question""": question} else: UpperCAmelCase__ = image UpperCAmelCase__ = super().__call__(_UpperCAmelCase , **_UpperCAmelCase ) return results def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Union[str, Any]=False , _UpperCAmelCase : List[Any]=False ): """simple docstring""" UpperCAmelCase__ = load_image(inputs["""image"""] ) UpperCAmelCase__ = self.tokenizer( inputs["""question"""] , return_tensors=self.framework , padding=_UpperCAmelCase , truncation=_UpperCAmelCase ) UpperCAmelCase__ = self.image_processor(images=_UpperCAmelCase , return_tensors=self.framework ) model_inputs.update(_UpperCAmelCase ) return model_inputs def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , _UpperCAmelCase : Optional[Any] ): """simple docstring""" UpperCAmelCase__ = self.model(**_UpperCAmelCase ) return model_outputs def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : int=5 ): """simple docstring""" if top_k > self.model.config.num_labels: UpperCAmelCase__ = self.model.config.num_labels if self.framework == "pt": UpperCAmelCase__ = model_outputs.logits.sigmoid()[0] UpperCAmelCase__ , UpperCAmelCase__ = probs.topk(_UpperCAmelCase ) else: raise ValueError(f'''Unsupported framework: {self.framework}''' ) UpperCAmelCase__ = scores.tolist() UpperCAmelCase__ = ids.tolist() return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCAmelCase , _UpperCAmelCase )]
346
'''simple docstring''' import datasets from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py UpperCAmelCase_ = '\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",\n author = "Lin, Chin-Yew and\n Och, Franz Josef",\n booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",\n month = "aug 23{--}aug 27",\n year = "2004",\n address = "Geneva, Switzerland",\n publisher = "COLING",\n url = "https://www.aclweb.org/anthology/C04-1072",\n pages = "501--507",\n}\n' UpperCAmelCase_ = '\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,\nthe better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n' UpperCAmelCase_ = '\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n \'bleu\': bleu score,\n \'precisions\': geometric mean of n-gram precisions,\n \'brevity_penalty\': brevity penalty,\n \'length_ratio\': ratio of lengths,\n \'translation_length\': translation_length,\n \'reference_length\': reference_length\nExamples:\n\n >>> predictions = [\n ... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample\n ... ["foo", "bar", "foobar"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)\n ... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric("bleu")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results["bleu"])\n 1.0\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase_ ( datasets.Metric ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : int ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ), """references""": datasets.Sequence( datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ), } ) , codebase_urls=["""https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"""] , reference_urls=[ """https://en.wikipedia.org/wiki/BLEU""", """https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""", ] , ) def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any]=4 , _UpperCAmelCase : Union[str, Any]=False ): """simple docstring""" UpperCAmelCase__ = compute_bleu( reference_corpus=_UpperCAmelCase , translation_corpus=_UpperCAmelCase , max_order=_UpperCAmelCase , smooth=_UpperCAmelCase ) ((UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__)) = score return { "bleu": bleu, "precisions": precisions, "brevity_penalty": bp, "length_ratio": ratio, "translation_length": translation_length, "reference_length": reference_length, }
346
1
'''simple docstring''' import logging import os from dataclasses import dataclass from enum import Enum from typing import List, Optional, Union from filelock import FileLock from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available UpperCAmelCase_ = logging.getLogger(__name__) @dataclass class lowerCAmelCase_ : '''simple docstring''' lowerCAmelCase_ : str lowerCAmelCase_ : List[str] lowerCAmelCase_ : Optional[List[str]] @dataclass class lowerCAmelCase_ : '''simple docstring''' lowerCAmelCase_ : List[int] lowerCAmelCase_ : List[int] lowerCAmelCase_ : Optional[List[int]] = None lowerCAmelCase_ : Optional[List[int]] = None class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' lowerCAmelCase_ : List[Any] = """train""" lowerCAmelCase_ : Optional[Any] = """dev""" lowerCAmelCase_ : Optional[Any] = """test""" class lowerCAmelCase_ : '''simple docstring''' @staticmethod def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[Split, str] ): """simple docstring""" raise NotImplementedError @staticmethod def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase : str ): """simple docstring""" raise NotImplementedError @staticmethod def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase : List[InputExample] , _UpperCAmelCase : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : PreTrainedTokenizer , _UpperCAmelCase : Tuple=False , _UpperCAmelCase : Tuple="[CLS]" , _UpperCAmelCase : Any=1 , _UpperCAmelCase : Optional[Any]="[SEP]" , _UpperCAmelCase : Any=False , _UpperCAmelCase : Dict=False , _UpperCAmelCase : Dict=0 , _UpperCAmelCase : str=0 , _UpperCAmelCase : List[Any]=-1_00 , _UpperCAmelCase : Optional[Any]=0 , _UpperCAmelCase : Union[str, Any]=True , ): """simple docstring""" UpperCAmelCase__ = {label: i for i, label in enumerate(_UpperCAmelCase )} UpperCAmelCase__ = [] for ex_index, example in enumerate(_UpperCAmelCase ): if ex_index % 1_00_00 == 0: logger.info("""Writing example %d of %d""" , _UpperCAmelCase , len(_UpperCAmelCase ) ) UpperCAmelCase__ = [] UpperCAmelCase__ = [] for word, label in zip(example.words , example.labels ): UpperCAmelCase__ = tokenizer.tokenize(_UpperCAmelCase ) # bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space. if len(_UpperCAmelCase ) > 0: tokens.extend(_UpperCAmelCase ) # Use the real label id for the first token of the word, and padding ids for the remaining tokens label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(_UpperCAmelCase ) - 1) ) # Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa. UpperCAmelCase__ = tokenizer.num_special_tokens_to_add() if len(_UpperCAmelCase ) > max_seq_length - special_tokens_count: UpperCAmelCase__ = tokens[: (max_seq_length - special_tokens_count)] UpperCAmelCase__ = label_ids[: (max_seq_length - special_tokens_count)] # The convention in BERT is: # (a) For sequence pairs: # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 # (b) For single sequences: # tokens: [CLS] the dog is hairy . [SEP] # type_ids: 0 0 0 0 0 0 0 # # Where "type_ids" are used to indicate whether this is the first # sequence or the second sequence. The embedding vectors for `type=0` and # `type=1` were learned during pre-training and are added to the wordpiece # embedding vector (and position vector). This is not *strictly* necessary # since the [SEP] token unambiguously separates the sequences, but it makes # it easier for the model to learn the concept of sequences. # # For classification tasks, the first vector (corresponding to [CLS]) is # used as the "sentence vector". Note that this only makes sense because # the entire model is fine-tuned. tokens += [sep_token] label_ids += [pad_token_label_id] if sep_token_extra: # roberta uses an extra separator b/w pairs of sentences tokens += [sep_token] label_ids += [pad_token_label_id] UpperCAmelCase__ = [sequence_a_segment_id] * len(_UpperCAmelCase ) if cls_token_at_end: tokens += [cls_token] label_ids += [pad_token_label_id] segment_ids += [cls_token_segment_id] else: UpperCAmelCase__ = [cls_token] + tokens UpperCAmelCase__ = [pad_token_label_id] + label_ids UpperCAmelCase__ = [cls_token_segment_id] + segment_ids UpperCAmelCase__ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. UpperCAmelCase__ = [1 if mask_padding_with_zero else 0] * len(_UpperCAmelCase ) # Zero-pad up to the sequence length. UpperCAmelCase__ = max_seq_length - len(_UpperCAmelCase ) if pad_on_left: UpperCAmelCase__ = ([pad_token] * padding_length) + input_ids UpperCAmelCase__ = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask UpperCAmelCase__ = ([pad_token_segment_id] * padding_length) + segment_ids UpperCAmelCase__ = ([pad_token_label_id] * padding_length) + label_ids else: input_ids += [pad_token] * padding_length input_mask += [0 if mask_padding_with_zero else 1] * padding_length segment_ids += [pad_token_segment_id] * padding_length label_ids += [pad_token_label_id] * padding_length assert len(_UpperCAmelCase ) == max_seq_length assert len(_UpperCAmelCase ) == max_seq_length assert len(_UpperCAmelCase ) == max_seq_length assert len(_UpperCAmelCase ) == max_seq_length if ex_index < 5: logger.info("""*** Example ***""" ) logger.info("""guid: %s""" , example.guid ) logger.info("""tokens: %s""" , """ """.join([str(_UpperCAmelCase ) for x in tokens] ) ) logger.info("""input_ids: %s""" , """ """.join([str(_UpperCAmelCase ) for x in input_ids] ) ) logger.info("""input_mask: %s""" , """ """.join([str(_UpperCAmelCase ) for x in input_mask] ) ) logger.info("""segment_ids: %s""" , """ """.join([str(_UpperCAmelCase ) for x in segment_ids] ) ) logger.info("""label_ids: %s""" , """ """.join([str(_UpperCAmelCase ) for x in label_ids] ) ) if "token_type_ids" not in tokenizer.model_input_names: UpperCAmelCase__ = None features.append( InputFeatures( input_ids=_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , label_ids=_UpperCAmelCase ) ) return features if is_torch_available(): import torch from torch import nn from torch.utils.data import Dataset class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' lowerCAmelCase_ : List[InputFeatures] lowerCAmelCase_ : int = nn.CrossEntropyLoss().ignore_index def __init__( self : Union[str, Any] , _UpperCAmelCase : TokenClassificationTask , _UpperCAmelCase : str , _UpperCAmelCase : PreTrainedTokenizer , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[int]=False , _UpperCAmelCase : Split = Split.train , ): """simple docstring""" UpperCAmelCase__ = os.path.join( _UpperCAmelCase , """cached_{}_{}_{}""".format(mode.value , tokenizer.__class__.__name__ , str(_UpperCAmelCase ) ) , ) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. UpperCAmelCase__ = cached_features_file + """.lock""" with FileLock(_UpperCAmelCase ): if os.path.exists(_UpperCAmelCase ) and not overwrite_cache: logger.info(f'''Loading features from cached file {cached_features_file}''' ) UpperCAmelCase__ = torch.load(_UpperCAmelCase ) else: logger.info(f'''Creating features from dataset file at {data_dir}''' ) UpperCAmelCase__ = token_classification_task.read_examples_from_file(_UpperCAmelCase , _UpperCAmelCase ) # TODO clean up all this to leverage built-in features of tokenizers UpperCAmelCase__ = token_classification_task.convert_examples_to_features( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_UpperCAmelCase , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , ) logger.info(f'''Saving features into cached file {cached_features_file}''' ) torch.save(self.features , _UpperCAmelCase ) def __len__( self : Tuple ): """simple docstring""" return len(self.features ) def __getitem__( self : Union[str, Any] , _UpperCAmelCase : Any ): """simple docstring""" return self.features[i] if is_tf_available(): import tensorflow as tf class lowerCAmelCase_ : '''simple docstring''' lowerCAmelCase_ : List[InputFeatures] lowerCAmelCase_ : int = -100 def __init__( self : Any , _UpperCAmelCase : TokenClassificationTask , _UpperCAmelCase : str , _UpperCAmelCase : PreTrainedTokenizer , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Dict=False , _UpperCAmelCase : Split = Split.train , ): """simple docstring""" UpperCAmelCase__ = token_classification_task.read_examples_from_file(_UpperCAmelCase , _UpperCAmelCase ) # TODO clean up all this to leverage built-in features of tokenizers UpperCAmelCase__ = token_classification_task.convert_examples_to_features( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_UpperCAmelCase , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , ) def gen(): for ex in self.features: if ex.token_type_ids is None: yield ( {"input_ids": ex.input_ids, "attention_mask": ex.attention_mask}, ex.label_ids, ) else: yield ( { "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label_ids, ) if "token_type_ids" not in tokenizer.model_input_names: UpperCAmelCase__ = tf.data.Dataset.from_generator( _UpperCAmelCase , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa}, tf.intaa) , ( {"""input_ids""": tf.TensorShape([None] ), """attention_mask""": tf.TensorShape([None] )}, tf.TensorShape([None] ), ) , ) else: UpperCAmelCase__ = tf.data.Dataset.from_generator( _UpperCAmelCase , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa, """token_type_ids""": tf.intaa}, tf.intaa) , ( { """input_ids""": tf.TensorShape([None] ), """attention_mask""": tf.TensorShape([None] ), """token_type_ids""": tf.TensorShape([None] ), }, tf.TensorShape([None] ), ) , ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" UpperCAmelCase__ = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) ) return self.dataset def __len__( self : Optional[Any] ): """simple docstring""" return len(self.features ) def __getitem__( self : str , _UpperCAmelCase : Tuple ): """simple docstring""" return self.features[i]
346
'''simple docstring''' from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import torch from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available @dataclass class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' lowerCAmelCase_ : Union[List[np.ndarray], torch.FloatTensor] try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_text_to_video_synth import TextToVideoSDPipeline from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401 from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
346
1
'''simple docstring''' from __future__ import annotations from typing import Any class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Optional[int] , _UpperCAmelCase : int ): """simple docstring""" UpperCAmelCase__ = num_of_nodes UpperCAmelCase__ = [] UpperCAmelCase__ = {} def SCREAMING_SNAKE_CASE__ ( self : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ): """simple docstring""" self.m_edges.append([u_node, v_node, weight] ) def SCREAMING_SNAKE_CASE__ ( self : List[str] , _UpperCAmelCase : int ): """simple docstring""" if self.m_component[u_node] == u_node: return u_node return self.find_component(self.m_component[u_node] ) def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : int ): """simple docstring""" if self.m_component[u_node] != u_node: for k in self.m_component: UpperCAmelCase__ = self.find_component(_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Tuple , _UpperCAmelCase : list[int] , _UpperCAmelCase : int , _UpperCAmelCase : int ): """simple docstring""" if component_size[u_node] <= component_size[v_node]: UpperCAmelCase__ = v_node component_size[v_node] += component_size[u_node] self.set_component(_UpperCAmelCase ) elif component_size[u_node] >= component_size[v_node]: UpperCAmelCase__ = self.find_component(_UpperCAmelCase ) component_size[u_node] += component_size[v_node] self.set_component(_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Dict ): """simple docstring""" UpperCAmelCase__ = [] UpperCAmelCase__ = 0 UpperCAmelCase__ = [-1] * self.m_num_of_nodes # A list of components (initialized to all of the nodes) for node in range(self.m_num_of_nodes ): self.m_component.update({node: node} ) component_size.append(1 ) UpperCAmelCase__ = self.m_num_of_nodes while num_of_components > 1: for edge in self.m_edges: UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = edge UpperCAmelCase__ = self.m_component[u] UpperCAmelCase__ = self.m_component[v] if u_component != v_component: for component in (u_component, v_component): if ( minimum_weight_edge[component] == -1 or minimum_weight_edge[component][2] > w ): UpperCAmelCase__ = [u, v, w] for edge in minimum_weight_edge: if isinstance(_UpperCAmelCase , _UpperCAmelCase ): UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = edge UpperCAmelCase__ = self.m_component[u] UpperCAmelCase__ = self.m_component[v] if u_component != v_component: mst_weight += w self.union(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) print(f'''Added edge [{u} - {v}]\nAdded weight: {w}\n''' ) num_of_components -= 1 UpperCAmelCase__ = [-1] * self.m_num_of_nodes print(f'''The total weight of the minimal spanning tree is: {mst_weight}''' ) def _UpperCamelCase ( ): '''simple docstring''' if __name__ == "__main__": import doctest doctest.testmod()
346
'''simple docstring''' import torch import torch.nn as nn from transformers.modeling_utils import ModuleUtilsMixin from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class lowerCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): '''simple docstring''' @register_to_config def __init__( self : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : float , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : bool = False , ): """simple docstring""" super().__init__() UpperCAmelCase__ = nn.Embedding(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = nn.Embedding(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = False UpperCAmelCase__ = nn.Dropout(p=_UpperCAmelCase ) UpperCAmelCase__ = TaConfig( vocab_size=_UpperCAmelCase , d_model=_UpperCAmelCase , num_heads=_UpperCAmelCase , d_kv=_UpperCAmelCase , d_ff=_UpperCAmelCase , dropout_rate=_UpperCAmelCase , feed_forward_proj=_UpperCAmelCase , is_decoder=_UpperCAmelCase , is_encoder_decoder=_UpperCAmelCase , ) UpperCAmelCase__ = nn.ModuleList() for lyr_num in range(_UpperCAmelCase ): UpperCAmelCase__ = TaBlock(_UpperCAmelCase ) self.encoders.append(_UpperCAmelCase ) UpperCAmelCase__ = TaLayerNorm(_UpperCAmelCase ) UpperCAmelCase__ = nn.Dropout(p=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : int , _UpperCAmelCase : str ): """simple docstring""" UpperCAmelCase__ = self.token_embedder(_UpperCAmelCase ) UpperCAmelCase__ = encoder_input_tokens.shape[1] UpperCAmelCase__ = torch.arange(_UpperCAmelCase , device=encoder_input_tokens.device ) x += self.position_encoding(_UpperCAmelCase ) UpperCAmelCase__ = self.dropout_pre(_UpperCAmelCase ) # inverted the attention mask UpperCAmelCase__ = encoder_input_tokens.size() UpperCAmelCase__ = self.get_extended_attention_mask(_UpperCAmelCase , _UpperCAmelCase ) for lyr in self.encoders: UpperCAmelCase__ = lyr(_UpperCAmelCase , _UpperCAmelCase )[0] UpperCAmelCase__ = self.layer_norm(_UpperCAmelCase ) return self.dropout_post(_UpperCAmelCase ), encoder_inputs_mask
346
1
'''simple docstring''' from ..utils import DummyObject, requires_backends class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ): '''simple docstring''' lowerCAmelCase_ : List[str] = ["""flax"""] def __init__( self : str , *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : Optional[int] ): """simple docstring""" requires_backends(self , ["""flax"""] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Dict , *_UpperCAmelCase : Tuple , **_UpperCAmelCase : Optional[int] ): """simple docstring""" requires_backends(cls , ["""flax"""] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Union[str, Any] , *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : Dict ): """simple docstring""" requires_backends(cls , ["""flax"""] ) class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ): '''simple docstring''' lowerCAmelCase_ : str = ["""flax"""] def __init__( self : Optional[Any] , *_UpperCAmelCase : int , **_UpperCAmelCase : Dict ): """simple docstring""" requires_backends(self , ["""flax"""] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Tuple , *_UpperCAmelCase : Any , **_UpperCAmelCase : str ): """simple docstring""" requires_backends(cls , ["""flax"""] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Any , *_UpperCAmelCase : Optional[int] , **_UpperCAmelCase : Optional[int] ): """simple docstring""" requires_backends(cls , ["""flax"""] ) class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ): '''simple docstring''' lowerCAmelCase_ : List[Any] = ["""flax"""] def __init__( self : List[Any] , *_UpperCAmelCase : Any , **_UpperCAmelCase : List[Any] ): """simple docstring""" requires_backends(self , ["""flax"""] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : List[str] , *_UpperCAmelCase : Optional[int] , **_UpperCAmelCase : Union[str, Any] ): """simple docstring""" requires_backends(cls , ["""flax"""] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Dict , *_UpperCAmelCase : Tuple , **_UpperCAmelCase : List[Any] ): """simple docstring""" requires_backends(cls , ["""flax"""] ) class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ): '''simple docstring''' lowerCAmelCase_ : Optional[Any] = ["""flax"""] def __init__( self : int , *_UpperCAmelCase : str , **_UpperCAmelCase : List[str] ): """simple docstring""" requires_backends(self , ["""flax"""] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : List[str] , *_UpperCAmelCase : Tuple , **_UpperCAmelCase : Tuple ): """simple docstring""" requires_backends(cls , ["""flax"""] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Dict , *_UpperCAmelCase : Any , **_UpperCAmelCase : Union[str, Any] ): """simple docstring""" requires_backends(cls , ["""flax"""] ) class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ): '''simple docstring''' lowerCAmelCase_ : Optional[Any] = ["""flax"""] def __init__( self : Dict , *_UpperCAmelCase : List[Any] , **_UpperCAmelCase : str ): """simple docstring""" requires_backends(self , ["""flax"""] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Tuple , *_UpperCAmelCase : Dict , **_UpperCAmelCase : List[Any] ): """simple docstring""" requires_backends(cls , ["""flax"""] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : List[Any] , *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : Tuple ): """simple docstring""" requires_backends(cls , ["""flax"""] ) class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ): '''simple docstring''' lowerCAmelCase_ : List[str] = ["""flax"""] def __init__( self : List[str] , *_UpperCAmelCase : Union[str, Any] , **_UpperCAmelCase : int ): """simple docstring""" requires_backends(self , ["""flax"""] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Optional[int] , *_UpperCAmelCase : Optional[int] , **_UpperCAmelCase : Optional[Any] ): """simple docstring""" requires_backends(cls , ["""flax"""] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Tuple , *_UpperCAmelCase : Tuple , **_UpperCAmelCase : List[Any] ): """simple docstring""" requires_backends(cls , ["""flax"""] ) class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ): '''simple docstring''' lowerCAmelCase_ : Dict = ["""flax"""] def __init__( self : Optional[Any] , *_UpperCAmelCase : Union[str, Any] , **_UpperCAmelCase : Tuple ): """simple docstring""" requires_backends(self , ["""flax"""] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Optional[Any] , *_UpperCAmelCase : int , **_UpperCAmelCase : int ): """simple docstring""" requires_backends(cls , ["""flax"""] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Tuple , *_UpperCAmelCase : Optional[int] , **_UpperCAmelCase : Any ): """simple docstring""" requires_backends(cls , ["""flax"""] ) class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ): '''simple docstring''' lowerCAmelCase_ : str = ["""flax"""] def __init__( self : Optional[Any] , *_UpperCAmelCase : List[Any] , **_UpperCAmelCase : int ): """simple docstring""" requires_backends(self , ["""flax"""] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Union[str, Any] , *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : Dict ): """simple docstring""" requires_backends(cls , ["""flax"""] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Union[str, Any] , *_UpperCAmelCase : Tuple , **_UpperCAmelCase : Optional[Any] ): """simple docstring""" requires_backends(cls , ["""flax"""] ) class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ): '''simple docstring''' lowerCAmelCase_ : Union[str, Any] = ["""flax"""] def __init__( self : Optional[int] , *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : Optional[int] ): """simple docstring""" requires_backends(self , ["""flax"""] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Optional[Any] , *_UpperCAmelCase : Dict , **_UpperCAmelCase : Tuple ): """simple docstring""" requires_backends(cls , ["""flax"""] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : int , *_UpperCAmelCase : Dict , **_UpperCAmelCase : List[str] ): """simple docstring""" requires_backends(cls , ["""flax"""] ) class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ): '''simple docstring''' lowerCAmelCase_ : Optional[Any] = ["""flax"""] def __init__( self : Tuple , *_UpperCAmelCase : int , **_UpperCAmelCase : List[str] ): """simple docstring""" requires_backends(self , ["""flax"""] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Union[str, Any] , *_UpperCAmelCase : Optional[int] , **_UpperCAmelCase : Optional[Any] ): """simple docstring""" requires_backends(cls , ["""flax"""] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Dict , *_UpperCAmelCase : Union[str, Any] , **_UpperCAmelCase : Union[str, Any] ): """simple docstring""" requires_backends(cls , ["""flax"""] ) class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ): '''simple docstring''' lowerCAmelCase_ : List[str] = ["""flax"""] def __init__( self : List[str] , *_UpperCAmelCase : List[str] , **_UpperCAmelCase : Optional[Any] ): """simple docstring""" requires_backends(self , ["""flax"""] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Tuple , *_UpperCAmelCase : Any , **_UpperCAmelCase : int ): """simple docstring""" requires_backends(cls , ["""flax"""] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : List[Any] , *_UpperCAmelCase : str , **_UpperCAmelCase : List[Any] ): """simple docstring""" requires_backends(cls , ["""flax"""] ) class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ): '''simple docstring''' lowerCAmelCase_ : Optional[int] = ["""flax"""] def __init__( self : Union[str, Any] , *_UpperCAmelCase : str , **_UpperCAmelCase : int ): """simple docstring""" requires_backends(self , ["""flax"""] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : List[str] , *_UpperCAmelCase : Tuple , **_UpperCAmelCase : Dict ): """simple docstring""" requires_backends(cls , ["""flax"""] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Any , *_UpperCAmelCase : Union[str, Any] , **_UpperCAmelCase : List[str] ): """simple docstring""" requires_backends(cls , ["""flax"""] ) class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ): '''simple docstring''' lowerCAmelCase_ : Any = ["""flax"""] def __init__( self : Optional[int] , *_UpperCAmelCase : List[Any] , **_UpperCAmelCase : int ): """simple docstring""" requires_backends(self , ["""flax"""] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Union[str, Any] , *_UpperCAmelCase : List[Any] , **_UpperCAmelCase : Union[str, Any] ): """simple docstring""" requires_backends(cls , ["""flax"""] ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Optional[int] , *_UpperCAmelCase : int , **_UpperCAmelCase : List[str] ): """simple docstring""" requires_backends(cls , ["""flax"""] )
346
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConfig, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaForPreTraining, WavaVecaProcessor, logging, ) from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification logging.set_verbosity_info() UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'adapter_layer': 'encoder.layers.*.adapter_layer', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', 'pooling_layer.linear': 'projector', 'pooling_layer.projection': 'classifier', } UpperCAmelCase_ = [ 'lm_head', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', 'projector', 'classifier', ] def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Tuple ): '''simple docstring''' UpperCAmelCase__ = {} with open(SCREAMING_SNAKE_CASE__ , """r""" ) as file: for line_number, line in enumerate(SCREAMING_SNAKE_CASE__ ): UpperCAmelCase__ = line.strip() if line: UpperCAmelCase__ = line.split() UpperCAmelCase__ = line_number UpperCAmelCase__ = words[0] UpperCAmelCase__ = value return result def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ): '''simple docstring''' for attribute in key.split(""".""" ): UpperCAmelCase__ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(SCREAMING_SNAKE_CASE__ ): UpperCAmelCase__ = PARAM_MAPPING[full_name.split(""".""" )[-1]] UpperCAmelCase__ = """param""" if weight_type is not None and weight_type != "param": UpperCAmelCase__ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).shape elif weight_type is not None and weight_type == "param": UpperCAmelCase__ = hf_pointer for attribute in hf_param_name.split(""".""" ): UpperCAmelCase__ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = shape_pointer.shape # let's reduce dimension UpperCAmelCase__ = value[0] else: UpperCAmelCase__ = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": UpperCAmelCase__ = value elif weight_type == "weight_g": UpperCAmelCase__ = value elif weight_type == "weight_v": UpperCAmelCase__ = value elif weight_type == "bias": UpperCAmelCase__ = value elif weight_type == "param": for attribute in hf_param_name.split(""".""" ): UpperCAmelCase__ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = value else: UpperCAmelCase__ = value logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' ) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(SCREAMING_SNAKE_CASE__ ): UpperCAmelCase__ = PARAM_MAPPING[full_name.split(""".""" )[-1]] UpperCAmelCase__ = """param""" if weight_type is not None and weight_type != "param": UpperCAmelCase__ = """.""".join([key, weight_type] ) elif weight_type is not None and weight_type == "param": UpperCAmelCase__ = """.""".join([key, hf_param_name] ) else: UpperCAmelCase__ = key UpperCAmelCase__ = value if """lm_head""" in full_key else value[0] UpperCAmelCase_ = { 'W_a': 'linear_1.weight', 'W_b': 'linear_2.weight', 'b_a': 'linear_1.bias', 'b_b': 'linear_2.bias', 'ln_W': 'norm.weight', 'ln_b': 'norm.bias', } def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None ): '''simple docstring''' UpperCAmelCase__ = False for key, mapped_key in MAPPING.items(): UpperCAmelCase__ = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: UpperCAmelCase__ = True if "*" in mapped_key: UpperCAmelCase__ = name.split(SCREAMING_SNAKE_CASE__ )[0].split(""".""" )[-2] UpperCAmelCase__ = mapped_key.replace("""*""" , SCREAMING_SNAKE_CASE__ ) if "weight_g" in name: UpperCAmelCase__ = """weight_g""" elif "weight_v" in name: UpperCAmelCase__ = """weight_v""" elif "bias" in name: UpperCAmelCase__ = """bias""" elif "weight" in name: # TODO: don't match quantizer.weight_proj UpperCAmelCase__ = """weight""" else: UpperCAmelCase__ = None if hf_dict is not None: rename_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else: set_recursively(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return is_used return is_used def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] ): '''simple docstring''' UpperCAmelCase__ = [] UpperCAmelCase__ = fairseq_model.state_dict() UpperCAmelCase__ = hf_model.wavaveca.feature_extractor for name, value in fairseq_dict.items(): UpperCAmelCase__ = False if "conv_layers" in name: load_conv_layer( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , hf_model.config.feat_extract_norm == """group""" , ) UpperCAmelCase__ = True else: UpperCAmelCase__ = load_wavaveca_layer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if not is_used: unused_weights.append(SCREAMING_SNAKE_CASE__ ) logger.warning(F'''Unused weights: {unused_weights}''' ) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' UpperCAmelCase__ = full_name.split("""conv_layers.""" )[-1] UpperCAmelCase__ = name.split(""".""" ) UpperCAmelCase__ = int(items[0] ) UpperCAmelCase__ = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) UpperCAmelCase__ = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) UpperCAmelCase__ = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' ) UpperCAmelCase__ = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' ) UpperCAmelCase__ = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(SCREAMING_SNAKE_CASE__ ) @torch.no_grad() def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False ): '''simple docstring''' if config_path is not None: UpperCAmelCase__ = WavaVecaConfig.from_pretrained(SCREAMING_SNAKE_CASE__ ) else: UpperCAmelCase__ = WavaVecaConfig() if is_seq_class: UpperCAmelCase__ = read_txt_into_dict(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = idalabel UpperCAmelCase__ = WavaVecaForSequenceClassification(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , ) feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE__ ) elif is_finetuned: if dict_path: UpperCAmelCase__ = Dictionary.load(SCREAMING_SNAKE_CASE__ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq UpperCAmelCase__ = target_dict.pad_index UpperCAmelCase__ = target_dict.bos_index UpperCAmelCase__ = target_dict.eos_index UpperCAmelCase__ = len(target_dict.symbols ) UpperCAmelCase__ = os.path.join(SCREAMING_SNAKE_CASE__ , """vocab.json""" ) if not os.path.isdir(SCREAMING_SNAKE_CASE__ ): logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(SCREAMING_SNAKE_CASE__ ) ) return os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = target_dict.indices # fairseq has the <pad> and <s> switched UpperCAmelCase__ = 0 UpperCAmelCase__ = 1 with open(SCREAMING_SNAKE_CASE__ , """w""" , encoding="""utf-8""" ) as vocab_handle: json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = WavaVecaCTCTokenizer( SCREAMING_SNAKE_CASE__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=SCREAMING_SNAKE_CASE__ , ) UpperCAmelCase__ = True if config.feat_extract_norm == """layer""" else False UpperCAmelCase__ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , ) UpperCAmelCase__ = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ ) processor.save_pretrained(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = WavaVecaForCTC(SCREAMING_SNAKE_CASE__ ) else: UpperCAmelCase__ = WavaVecaForPreTraining(SCREAMING_SNAKE_CASE__ ) if is_finetuned or is_seq_class: UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) else: UpperCAmelCase__ = argparse.Namespace(task="""audio_pretraining""" ) UpperCAmelCase__ = fairseq.tasks.setup_task(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = model[0].eval() recursively_load_weights(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , not is_finetuned ) hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not' ) parser.add_argument( '--is_seq_class', action='store_true', help='Whether the model to convert is a fine-tuned sequence classification model or not', ) UpperCAmelCase_ = parser.parse_args() UpperCAmelCase_ = not args.not_finetuned and not args.is_seq_class convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, is_finetuned, args.is_seq_class, )
346
1
'''simple docstring''' from importlib import import_module from .logging import get_logger UpperCAmelCase_ = get_logger(__name__) class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[Any]=None ): """simple docstring""" UpperCAmelCase__ = attrs or [] if module is not None: for key in module.__dict__: if key in attrs or not key.startswith("""__""" ): setattr(self , _UpperCAmelCase , getattr(_UpperCAmelCase , _UpperCAmelCase ) ) UpperCAmelCase__ = module._original_module if isinstance(_UpperCAmelCase , _PatchedModuleObj ) else module class lowerCAmelCase_ : '''simple docstring''' lowerCAmelCase_ : str = [] def __init__( self : Any , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple=None ): """simple docstring""" UpperCAmelCase__ = obj UpperCAmelCase__ = target UpperCAmelCase__ = new UpperCAmelCase__ = target.split(""".""" )[0] UpperCAmelCase__ = {} UpperCAmelCase__ = attrs or [] def __enter__( self : Tuple ): """simple docstring""" *UpperCAmelCase__ , UpperCAmelCase__ = self.target.split(""".""" ) # Patch modules: # it's used to patch attributes of submodules like "os.path.join"; # in this case we need to patch "os" and "os.path" for i in range(len(_UpperCAmelCase ) ): try: UpperCAmelCase__ = import_module(""".""".join(submodules[: i + 1] ) ) except ModuleNotFoundError: continue # We iterate over all the globals in self.obj in case we find "os" or "os.path" for attr in self.obj.__dir__(): UpperCAmelCase__ = getattr(self.obj , _UpperCAmelCase ) # We don't check for the name of the global, but rather if its value *is* "os" or "os.path". # This allows to patch renamed modules like "from os import path as ospath". if obj_attr is submodule or ( (isinstance(_UpperCAmelCase , _PatchedModuleObj ) and obj_attr._original_module is submodule) ): UpperCAmelCase__ = obj_attr # patch at top level setattr(self.obj , _UpperCAmelCase , _PatchedModuleObj(_UpperCAmelCase , attrs=self.attrs ) ) UpperCAmelCase__ = getattr(self.obj , _UpperCAmelCase ) # construct lower levels patches for key in submodules[i + 1 :]: setattr(_UpperCAmelCase , _UpperCAmelCase , _PatchedModuleObj(getattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , attrs=self.attrs ) ) UpperCAmelCase__ = getattr(_UpperCAmelCase , _UpperCAmelCase ) # finally set the target attribute setattr(_UpperCAmelCase , _UpperCAmelCase , self.new ) # Patch attribute itself: # it's used for builtins like "open", # and also to patch "os.path.join" we may also need to patch "join" # itself if it was imported as "from os.path import join". if submodules: # if it's an attribute of a submodule like "os.path.join" try: UpperCAmelCase__ = getattr(import_module(""".""".join(_UpperCAmelCase ) ) , _UpperCAmelCase ) except (AttributeError, ModuleNotFoundError): return # We iterate over all the globals in self.obj in case we find "os.path.join" for attr in self.obj.__dir__(): # We don't check for the name of the global, but rather if its value *is* "os.path.join". # This allows to patch renamed attributes like "from os.path import join as pjoin". if getattr(self.obj , _UpperCAmelCase ) is attr_value: UpperCAmelCase__ = getattr(self.obj , _UpperCAmelCase ) setattr(self.obj , _UpperCAmelCase , self.new ) elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open" UpperCAmelCase__ = globals()["""__builtins__"""][target_attr] setattr(self.obj , _UpperCAmelCase , self.new ) else: raise RuntimeError(f'''Tried to patch attribute {target_attr} instead of a submodule.''' ) def __exit__( self : str , *_UpperCAmelCase : int ): """simple docstring""" for attr in list(self.original ): setattr(self.obj , _UpperCAmelCase , self.original.pop(_UpperCAmelCase ) ) def SCREAMING_SNAKE_CASE__ ( self : int ): """simple docstring""" self.__enter__() self._active_patches.append(self ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" try: self._active_patches.remove(self ) except ValueError: # If the patch hasn't been started this will fail return None return self.__exit__()
346
'''simple docstring''' import itertools import os from collections import Counter, defaultdict from concurrent.futures import ThreadPoolExecutor, as_completed import numpy as np import datasets from .execute import check_correctness UpperCAmelCase_ = '\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n' UpperCAmelCase_ = '\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper "Evaluating Large Language Models Trained on Code"\n(https://arxiv.org/abs/2107.03374).\n' UpperCAmelCase_ = '\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric("code_eval")\n >>> test_cases = ["assert add(2,3)==5"]\n >>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {\'pass@1\': 0.5, \'pass@2\': 1.0}\n' UpperCAmelCase_ = '\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe "code_eval" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper "Evaluating Large\nLanguage Models Trained on Code" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"\n\n################################################################################\\n' UpperCAmelCase_ = 'The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the "Software"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase_ ( datasets.Metric ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" return datasets.MetricInfo( # This is the description that will appear on the metrics page. description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""" ) ), """references""": datasets.Value("""string""" ), } ) , homepage="""https://github.com/openai/human-eval""" , codebase_urls=["""https://github.com/openai/human-eval"""] , reference_urls=["""https://github.com/openai/human-eval"""] , license=_LICENSE , ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str]=[1, 10, 1_00] , _UpperCAmelCase : Optional[Any]=4 , _UpperCAmelCase : Any=3.0 ): """simple docstring""" if os.getenv("""HF_ALLOW_CODE_EVAL""" , 0 ) != "1": raise ValueError(_WARNING ) if os.name == "nt": raise NotImplementedError("""This metric is currently not supported on Windows.""" ) with ThreadPoolExecutor(max_workers=_UpperCAmelCase ) as executor: UpperCAmelCase__ = [] UpperCAmelCase__ = Counter() UpperCAmelCase__ = 0 UpperCAmelCase__ = defaultdict(_UpperCAmelCase ) for task_id, (candidates, test_case) in enumerate(zip(_UpperCAmelCase , _UpperCAmelCase ) ): for candidate in candidates: UpperCAmelCase__ = candidate + """\n""" + test_case UpperCAmelCase__ = (test_program, timeout, task_id, completion_id[task_id]) UpperCAmelCase__ = executor.submit(_UpperCAmelCase , *_UpperCAmelCase ) futures.append(_UpperCAmelCase ) completion_id[task_id] += 1 n_samples += 1 for future in as_completed(_UpperCAmelCase ): UpperCAmelCase__ = future.result() results[result["task_id"]].append((result["""completion_id"""], result) ) UpperCAmelCase__ , UpperCAmelCase__ = [], [] for result in results.values(): result.sort() UpperCAmelCase__ = [r[1]["""passed"""] for r in result] total.append(len(_UpperCAmelCase ) ) correct.append(sum(_UpperCAmelCase ) ) UpperCAmelCase__ = np.array(_UpperCAmelCase ) UpperCAmelCase__ = np.array(_UpperCAmelCase ) UpperCAmelCase__ = k UpperCAmelCase__ = {f'''pass@{k}''': estimate_pass_at_k(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).mean() for k in ks if (total >= k).all()} return pass_at_k, results def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] ): '''simple docstring''' def estimator(SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> float: if n - c < k: return 1.0 return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): UpperCAmelCase__ = itertools.repeat(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ) else: assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = iter(SCREAMING_SNAKE_CASE__ ) return np.array([estimator(int(SCREAMING_SNAKE_CASE__ ) , int(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) for n, c in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )] )
346
1
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from transformers.utils import is_vision_available from transformers.utils.generic import TensorType from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import logging if is_vision_available(): import PIL UpperCAmelCase_ = logging.get_logger(__name__) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ): '''simple docstring''' if isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(SCREAMING_SNAKE_CASE__ ): return [[videos]] raise ValueError(F'''Could not make batched video from {videos}''' ) class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' lowerCAmelCase_ : str = ["""pixel_values"""] def __init__( self : Any , _UpperCAmelCase : bool = True , _UpperCAmelCase : Dict[str, int] = None , _UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , _UpperCAmelCase : bool = True , _UpperCAmelCase : Dict[str, int] = None , _UpperCAmelCase : bool = True , _UpperCAmelCase : Union[int, float] = 1 / 2_55 , _UpperCAmelCase : bool = True , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , **_UpperCAmelCase : List[Any] , ): """simple docstring""" super().__init__(**_UpperCAmelCase ) UpperCAmelCase__ = size if size is not None else {"""shortest_edge""": 2_56} UpperCAmelCase__ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase ) UpperCAmelCase__ = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24} UpperCAmelCase__ = get_size_dict(_UpperCAmelCase , param_name="""crop_size""" ) UpperCAmelCase__ = do_resize UpperCAmelCase__ = size UpperCAmelCase__ = do_center_crop UpperCAmelCase__ = crop_size UpperCAmelCase__ = resample UpperCAmelCase__ = do_rescale UpperCAmelCase__ = rescale_factor UpperCAmelCase__ = offset UpperCAmelCase__ = do_normalize UpperCAmelCase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN UpperCAmelCase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Dict[str, int] , _UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : List[str] , ): """simple docstring""" UpperCAmelCase__ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase ) if "shortest_edge" in size: UpperCAmelCase__ = get_resize_output_image_size(_UpperCAmelCase , size["""shortest_edge"""] , default_to_square=_UpperCAmelCase ) elif "height" in size and "width" in size: UpperCAmelCase__ = (size["""height"""], size["""width"""]) else: raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' ) return resize(_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Dict[str, int] , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : str , ): """simple docstring""" UpperCAmelCase__ = get_size_dict(_UpperCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' ) return center_crop(_UpperCAmelCase , size=(size["""height"""], size["""width"""]) , data_format=_UpperCAmelCase , **_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Union[int, float] , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : List[Any] , ): """simple docstring""" UpperCAmelCase__ = image.astype(np.floataa ) if offset: UpperCAmelCase__ = image - (scale / 2) return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Tuple , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Union[float, List[float]] , _UpperCAmelCase : Union[float, List[float]] , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : List[str] , ): """simple docstring""" return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : str , _UpperCAmelCase : ImageInput , _UpperCAmelCase : bool = None , _UpperCAmelCase : Dict[str, int] = None , _UpperCAmelCase : PILImageResampling = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : Dict[str, int] = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : float = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , ): """simple docstring""" if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) if offset and not do_rescale: raise ValueError("""For offset, do_rescale must also be set to True.""" ) # All transformations expect numpy arrays. UpperCAmelCase__ = to_numpy_array(_UpperCAmelCase ) if do_resize: UpperCAmelCase__ = self.resize(image=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase ) if do_center_crop: UpperCAmelCase__ = self.center_crop(_UpperCAmelCase , size=_UpperCAmelCase ) if do_rescale: UpperCAmelCase__ = self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase , offset=_UpperCAmelCase ) if do_normalize: UpperCAmelCase__ = self.normalize(image=_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase ) UpperCAmelCase__ = to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase ) return image def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : ImageInput , _UpperCAmelCase : bool = None , _UpperCAmelCase : Dict[str, int] = None , _UpperCAmelCase : PILImageResampling = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : Dict[str, int] = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : float = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , _UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **_UpperCAmelCase : Any , ): """simple docstring""" UpperCAmelCase__ = do_resize if do_resize is not None else self.do_resize UpperCAmelCase__ = resample if resample is not None else self.resample UpperCAmelCase__ = do_center_crop if do_center_crop is not None else self.do_center_crop UpperCAmelCase__ = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCAmelCase__ = offset if offset is not None else self.offset UpperCAmelCase__ = do_normalize if do_normalize is not None else self.do_normalize UpperCAmelCase__ = image_mean if image_mean is not None else self.image_mean UpperCAmelCase__ = image_std if image_std is not None else self.image_std UpperCAmelCase__ = size if size is not None else self.size UpperCAmelCase__ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase ) UpperCAmelCase__ = crop_size if crop_size is not None else self.crop_size UpperCAmelCase__ = get_size_dict(_UpperCAmelCase , param_name="""crop_size""" ) if not valid_images(_UpperCAmelCase ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) UpperCAmelCase__ = make_batched(_UpperCAmelCase ) UpperCAmelCase__ = [ [ self._preprocess_image( image=_UpperCAmelCase , do_resize=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , do_center_crop=_UpperCAmelCase , crop_size=_UpperCAmelCase , do_rescale=_UpperCAmelCase , rescale_factor=_UpperCAmelCase , offset=_UpperCAmelCase , do_normalize=_UpperCAmelCase , image_mean=_UpperCAmelCase , image_std=_UpperCAmelCase , data_format=_UpperCAmelCase , ) for img in video ] for video in videos ] UpperCAmelCase__ = {"""pixel_values""": videos} return BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase )
346
'''simple docstring''' import math def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or not number % 2: # Negatives, 0, 1 and all even numbers are not primes return False UpperCAmelCase__ = range(3 , int(math.sqrt(SCREAMING_SNAKE_CASE__ ) + 1 ) , 2 ) return not any(not number % i for i in odd_numbers ) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str]=1 , **SCREAMING_SNAKE_CASE__ : List[str] ): '''simple docstring''' UpperCAmelCase__ = factor * value UpperCAmelCase__ = value while not is_prime(SCREAMING_SNAKE_CASE__ ): value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1 if value == first_value_val: return next_prime(value + 1 , **SCREAMING_SNAKE_CASE__ ) return value
346
1
'''simple docstring''' # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. UpperCAmelCase_ = abspath(join(dirname(dirname(dirname(__file__))), 'src')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='ignore', category=FutureWarning) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(SCREAMING_SNAKE_CASE__ ) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] ): '''simple docstring''' from transformers.testing_utils import pytest_terminal_summary_main UpperCAmelCase__ = terminalreporter.config.getoption("""--make-reports""" ) if make_reports: pytest_terminal_summary_main(SCREAMING_SNAKE_CASE__ , id=SCREAMING_SNAKE_CASE__ )
346
'''simple docstring''' import string from math import logaa def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' UpperCAmelCase__ = document.translate( str.maketrans("""""" , """""" , string.punctuation ) ).replace("""\n""" , """""" ) UpperCAmelCase__ = document_without_punctuation.split(""" """ ) # word tokenization return len([word for word in tokenize_document if word.lower() == term.lower()] ) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' UpperCAmelCase__ = corpus.lower().translate( str.maketrans("""""" , """""" , string.punctuation ) ) # strip all punctuation and replace it with '' UpperCAmelCase__ = corpus_without_punctuation.split("""\n""" ) UpperCAmelCase__ = term.lower() return (len([doc for doc in docs if term in doc] ), len(SCREAMING_SNAKE_CASE__ )) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple=False ): '''simple docstring''' if smoothing: if n == 0: raise ValueError("""log10(0) is undefined.""" ) return round(1 + logaa(n / (1 + df) ) , 3 ) if df == 0: raise ZeroDivisionError("""df must be > 0""" ) elif n == 0: raise ValueError("""log10(0) is undefined.""" ) return round(logaa(n / df ) , 3 ) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' return round(tf * idf , 3 )
346
1
'''simple docstring''' from __future__ import annotations UpperCAmelCase_ = { 'A': ['B', 'C', 'E'], 'B': ['A', 'D', 'E'], 'C': ['A', 'F', 'G'], 'D': ['B'], 'E': ['A', 'B', 'D'], 'F': ['C'], 'G': ['C'], } class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Optional[int] , _UpperCAmelCase : dict[str, list[str]] , _UpperCAmelCase : str ): """simple docstring""" UpperCAmelCase__ = graph # mapping node to its parent in resulting breadth first tree UpperCAmelCase__ = {} UpperCAmelCase__ = source_vertex def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" UpperCAmelCase__ = {self.source_vertex} UpperCAmelCase__ = None UpperCAmelCase__ = [self.source_vertex] # first in first out queue while queue: UpperCAmelCase__ = queue.pop(0 ) for adjacent_vertex in self.graph[vertex]: if adjacent_vertex not in visited: visited.add(_UpperCAmelCase ) UpperCAmelCase__ = vertex queue.append(_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : str , _UpperCAmelCase : str ): """simple docstring""" if target_vertex == self.source_vertex: return self.source_vertex UpperCAmelCase__ = self.parent.get(_UpperCAmelCase ) if target_vertex_parent is None: UpperCAmelCase__ = ( f'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}''' ) raise ValueError(_UpperCAmelCase ) return self.shortest_path(_UpperCAmelCase ) + f'''->{target_vertex}''' if __name__ == "__main__": UpperCAmelCase_ = Graph(graph, 'G') g.breath_first_search() print(g.shortest_path('D')) print(g.shortest_path('G')) print(g.shortest_path('Foo'))
346
'''simple docstring''' import argparse import torch from transformers import BertForMaskedLM if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser( description=( 'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned' ' Distillation' ) ) parser.add_argument('--model_type', default='bert', choices=['bert']) parser.add_argument('--model_name', default='bert-base-uncased', type=str) parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str) parser.add_argument('--vocab_transform', action='store_true') UpperCAmelCase_ = parser.parse_args() if args.model_type == "bert": UpperCAmelCase_ = BertForMaskedLM.from_pretrained(args.model_name) UpperCAmelCase_ = 'bert' else: raise ValueError('args.model_type should be "bert".') UpperCAmelCase_ = model.state_dict() UpperCAmelCase_ = {} for w in ["word_embeddings", "position_embeddings"]: UpperCAmelCase_ = state_dict[f"{prefix}.embeddings.{w}.weight"] for w in ["weight", "bias"]: UpperCAmelCase_ = state_dict[f"{prefix}.embeddings.LayerNorm.{w}"] UpperCAmelCase_ = 0 for teacher_idx in [0, 2, 4, 7, 9, 1_1]: for w in ["weight", "bias"]: UpperCAmelCase_ = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}" ] UpperCAmelCase_ = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}" ] UpperCAmelCase_ = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}" ] UpperCAmelCase_ = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}" ] UpperCAmelCase_ = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}" ] UpperCAmelCase_ = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}" ] UpperCAmelCase_ = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}" ] UpperCAmelCase_ = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}" ] std_idx += 1 UpperCAmelCase_ = state_dict['cls.predictions.decoder.weight'] UpperCAmelCase_ = state_dict['cls.predictions.bias'] if args.vocab_transform: for w in ["weight", "bias"]: UpperCAmelCase_ = state_dict[f"cls.predictions.transform.dense.{w}"] UpperCAmelCase_ = state_dict[f"cls.predictions.transform.LayerNorm.{w}"] print(f"N layers selected for distillation: {std_idx}") print(f"Number of params transferred for distillation: {len(compressed_sd.keys())}") print(f"Save transferred checkpoint to {args.dump_checkpoint}.") torch.save(compressed_sd, args.dump_checkpoint)
346
1
'''simple docstring''' import math def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' UpperCAmelCase__ = [True] * n UpperCAmelCase__ = False UpperCAmelCase__ = False UpperCAmelCase__ = True for i in range(3 , int(n**0.5 + 1 ) , 2 ): UpperCAmelCase__ = i * 2 while index < n: UpperCAmelCase__ = False UpperCAmelCase__ = index + i UpperCAmelCase__ = [2] for i in range(3 , SCREAMING_SNAKE_CASE__ , 2 ): if is_prime[i]: primes.append(SCREAMING_SNAKE_CASE__ ) return primes def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int = 999966663333 ): '''simple docstring''' UpperCAmelCase__ = math.floor(math.sqrt(SCREAMING_SNAKE_CASE__ ) ) + 100 UpperCAmelCase__ = prime_sieve(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = 0 UpperCAmelCase__ = 0 UpperCAmelCase__ = primes[prime_index] while (last_prime**2) <= limit: UpperCAmelCase__ = primes[prime_index + 1] UpperCAmelCase__ = last_prime**2 UpperCAmelCase__ = next_prime**2 # Get numbers divisible by lps(current) UpperCAmelCase__ = lower_bound + last_prime while upper_bound > current <= limit: matches_sum += current current += last_prime # Reset the upper_bound while (upper_bound - next_prime) > limit: upper_bound -= next_prime # Add the numbers divisible by ups(current) UpperCAmelCase__ = upper_bound - next_prime while current > lower_bound: matches_sum += current current -= next_prime # Remove the numbers divisible by both ups and lps UpperCAmelCase__ = 0 while upper_bound > current <= limit: if current <= lower_bound: # Increment the current number current += last_prime * next_prime continue if current > limit: break # Remove twice since it was added by both ups and lps matches_sum -= current * 2 # Increment the current number current += last_prime * next_prime # Setup for next pair UpperCAmelCase__ = next_prime prime_index += 1 return matches_sum if __name__ == "__main__": print(solution())
346
'''simple docstring''' import tempfile import torch from diffusers import PNDMScheduler from .test_schedulers import SchedulerCommonTest class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' lowerCAmelCase_ : Union[str, Any] = (PNDMScheduler,) lowerCAmelCase_ : Optional[int] = (("""num_inference_steps""", 50),) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , **_UpperCAmelCase : Optional[int] ): """simple docstring""" UpperCAmelCase__ = { """num_train_timesteps""": 10_00, """beta_start""": 0.0001, """beta_end""": 0.02, """beta_schedule""": """linear""", } config.update(**_UpperCAmelCase ) return config def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : Tuple=0 , **_UpperCAmelCase : List[str] ): """simple docstring""" UpperCAmelCase__ = dict(self.forward_default_kwargs ) UpperCAmelCase__ = kwargs.pop("""num_inference_steps""" , _UpperCAmelCase ) UpperCAmelCase__ = self.dummy_sample UpperCAmelCase__ = 0.1 * sample UpperCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: UpperCAmelCase__ = self.get_scheduler_config(**_UpperCAmelCase ) UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residuals UpperCAmelCase__ = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_UpperCAmelCase ) UpperCAmelCase__ = scheduler_class.from_pretrained(_UpperCAmelCase ) new_scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residuals UpperCAmelCase__ = dummy_past_residuals[:] UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample UpperCAmelCase__ = new_scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample UpperCAmelCase__ = new_scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" pass def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : Union[str, Any]=0 , **_UpperCAmelCase : Optional[int] ): """simple docstring""" UpperCAmelCase__ = dict(self.forward_default_kwargs ) UpperCAmelCase__ = kwargs.pop("""num_inference_steps""" , _UpperCAmelCase ) UpperCAmelCase__ = self.dummy_sample UpperCAmelCase__ = 0.1 * sample UpperCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: UpperCAmelCase__ = self.get_scheduler_config() UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residuals (must be after setting timesteps) UpperCAmelCase__ = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_UpperCAmelCase ) UpperCAmelCase__ = scheduler_class.from_pretrained(_UpperCAmelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residual (must be after setting timesteps) UpperCAmelCase__ = dummy_past_residuals[:] UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample UpperCAmelCase__ = new_scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample UpperCAmelCase__ = new_scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def SCREAMING_SNAKE_CASE__ ( self : int , **_UpperCAmelCase : Tuple ): """simple docstring""" UpperCAmelCase__ = self.scheduler_classes[0] UpperCAmelCase__ = self.get_scheduler_config(**_UpperCAmelCase ) UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) UpperCAmelCase__ = 10 UpperCAmelCase__ = self.dummy_model() UpperCAmelCase__ = self.dummy_sample_deter scheduler.set_timesteps(_UpperCAmelCase ) for i, t in enumerate(scheduler.prk_timesteps ): UpperCAmelCase__ = model(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample for i, t in enumerate(scheduler.plms_timesteps ): UpperCAmelCase__ = model(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample return sample def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" UpperCAmelCase__ = dict(self.forward_default_kwargs ) UpperCAmelCase__ = kwargs.pop("""num_inference_steps""" , _UpperCAmelCase ) for scheduler_class in self.scheduler_classes: UpperCAmelCase__ = self.get_scheduler_config() UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) UpperCAmelCase__ = self.dummy_sample UpperCAmelCase__ = 0.1 * sample if num_inference_steps is not None and hasattr(_UpperCAmelCase , """set_timesteps""" ): scheduler.set_timesteps(_UpperCAmelCase ) elif num_inference_steps is not None and not hasattr(_UpperCAmelCase , """set_timesteps""" ): UpperCAmelCase__ = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) UpperCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] UpperCAmelCase__ = dummy_past_residuals[:] UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , 0 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , 1 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , 0 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , 1 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def SCREAMING_SNAKE_CASE__ ( self : int ): """simple docstring""" for timesteps in [1_00, 10_00]: self.check_over_configs(num_train_timesteps=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" for steps_offset in [0, 1]: self.check_over_configs(steps_offset=_UpperCAmelCase ) UpperCAmelCase__ = self.scheduler_classes[0] UpperCAmelCase__ = self.get_scheduler_config(steps_offset=1 ) UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(10 ) assert torch.equal( scheduler.timesteps , torch.LongTensor( [9_01, 8_51, 8_51, 8_01, 8_01, 7_51, 7_51, 7_01, 7_01, 6_51, 6_51, 6_01, 6_01, 5_01, 4_01, 3_01, 2_01, 1_01, 1] ) , ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): """simple docstring""" for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ): self.check_over_configs(beta_start=_UpperCAmelCase , beta_end=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" for t in [1, 5, 10]: self.check_over_forward(time_step=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00] ): self.check_over_forward(num_inference_steps=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" UpperCAmelCase__ = 27 for scheduler_class in self.scheduler_classes: UpperCAmelCase__ = self.dummy_sample UpperCAmelCase__ = 0.1 * sample UpperCAmelCase__ = self.get_scheduler_config() UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(_UpperCAmelCase ) # before power of 3 fix, would error on first step, so we only need to do two for i, t in enumerate(scheduler.prk_timesteps[:2] ): UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample def SCREAMING_SNAKE_CASE__ ( self : Dict ): """simple docstring""" with self.assertRaises(_UpperCAmelCase ): UpperCAmelCase__ = self.scheduler_classes[0] UpperCAmelCase__ = self.get_scheduler_config() UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): """simple docstring""" UpperCAmelCase__ = self.full_loop() UpperCAmelCase__ = torch.sum(torch.abs(_UpperCAmelCase ) ) UpperCAmelCase__ = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_sum.item() - 198.1318 ) < 1E-2 assert abs(result_mean.item() - 0.2580 ) < 1E-3 def SCREAMING_SNAKE_CASE__ ( self : Dict ): """simple docstring""" UpperCAmelCase__ = self.full_loop(prediction_type="""v_prediction""" ) UpperCAmelCase__ = torch.sum(torch.abs(_UpperCAmelCase ) ) UpperCAmelCase__ = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_sum.item() - 67.3986 ) < 1E-2 assert abs(result_mean.item() - 0.0878 ) < 1E-3 def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" UpperCAmelCase__ = self.full_loop(set_alpha_to_one=_UpperCAmelCase , beta_start=0.01 ) UpperCAmelCase__ = torch.sum(torch.abs(_UpperCAmelCase ) ) UpperCAmelCase__ = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_sum.item() - 230.0399 ) < 1E-2 assert abs(result_mean.item() - 0.2995 ) < 1E-3 def SCREAMING_SNAKE_CASE__ ( self : int ): """simple docstring""" UpperCAmelCase__ = self.full_loop(set_alpha_to_one=_UpperCAmelCase , beta_start=0.01 ) UpperCAmelCase__ = torch.sum(torch.abs(_UpperCAmelCase ) ) UpperCAmelCase__ = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_sum.item() - 186.9482 ) < 1E-2 assert abs(result_mean.item() - 0.2434 ) < 1E-3
346
1
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { 'ut/deta': 'https://huggingface.co/ut/deta/resolve/main/config.json', } class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' lowerCAmelCase_ : Tuple = """deta""" lowerCAmelCase_ : Any = { """hidden_size""": """d_model""", """num_attention_heads""": """encoder_attention_heads""", } def __init__( self : Optional[int] , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : Tuple=9_00 , _UpperCAmelCase : Any=20_48 , _UpperCAmelCase : Optional[int]=6 , _UpperCAmelCase : Dict=20_48 , _UpperCAmelCase : List[Any]=8 , _UpperCAmelCase : Optional[Any]=6 , _UpperCAmelCase : Optional[Any]=10_24 , _UpperCAmelCase : str=8 , _UpperCAmelCase : List[Any]=0.0 , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : List[Any]="relu" , _UpperCAmelCase : Dict=2_56 , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : Any=0.0 , _UpperCAmelCase : int=0.02 , _UpperCAmelCase : str=1.0 , _UpperCAmelCase : Dict=True , _UpperCAmelCase : Tuple=False , _UpperCAmelCase : Optional[int]="sine" , _UpperCAmelCase : Optional[int]=5 , _UpperCAmelCase : List[str]=4 , _UpperCAmelCase : Tuple=4 , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : List[Any]=3_00 , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : str=True , _UpperCAmelCase : str=1 , _UpperCAmelCase : Dict=5 , _UpperCAmelCase : List[Any]=2 , _UpperCAmelCase : str=1 , _UpperCAmelCase : Tuple=1 , _UpperCAmelCase : Optional[Any]=5 , _UpperCAmelCase : Optional[Any]=2 , _UpperCAmelCase : Any=0.1 , _UpperCAmelCase : str=0.25 , **_UpperCAmelCase : Dict , ): """simple docstring""" if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" ) UpperCAmelCase__ = CONFIG_MAPPING["""resnet"""](out_features=["""stage2""", """stage3""", """stage4"""] ) else: if isinstance(_UpperCAmelCase , _UpperCAmelCase ): UpperCAmelCase__ = backbone_config.pop("""model_type""" ) UpperCAmelCase__ = CONFIG_MAPPING[backbone_model_type] UpperCAmelCase__ = config_class.from_dict(_UpperCAmelCase ) UpperCAmelCase__ = backbone_config UpperCAmelCase__ = num_queries UpperCAmelCase__ = max_position_embeddings UpperCAmelCase__ = d_model UpperCAmelCase__ = encoder_ffn_dim UpperCAmelCase__ = encoder_layers UpperCAmelCase__ = encoder_attention_heads UpperCAmelCase__ = decoder_ffn_dim UpperCAmelCase__ = decoder_layers UpperCAmelCase__ = decoder_attention_heads UpperCAmelCase__ = dropout UpperCAmelCase__ = attention_dropout UpperCAmelCase__ = activation_dropout UpperCAmelCase__ = activation_function UpperCAmelCase__ = init_std UpperCAmelCase__ = init_xavier_std UpperCAmelCase__ = encoder_layerdrop UpperCAmelCase__ = auxiliary_loss UpperCAmelCase__ = position_embedding_type # deformable attributes UpperCAmelCase__ = num_feature_levels UpperCAmelCase__ = encoder_n_points UpperCAmelCase__ = decoder_n_points UpperCAmelCase__ = two_stage UpperCAmelCase__ = two_stage_num_proposals UpperCAmelCase__ = with_box_refine UpperCAmelCase__ = assign_first_stage if two_stage is True and with_box_refine is False: raise ValueError("""If two_stage is True, with_box_refine must be True.""" ) # Hungarian matcher UpperCAmelCase__ = class_cost UpperCAmelCase__ = bbox_cost UpperCAmelCase__ = giou_cost # Loss coefficients UpperCAmelCase__ = mask_loss_coefficient UpperCAmelCase__ = dice_loss_coefficient UpperCAmelCase__ = bbox_loss_coefficient UpperCAmelCase__ = giou_loss_coefficient UpperCAmelCase__ = eos_coefficient UpperCAmelCase__ = focal_alpha super().__init__(is_encoder_decoder=_UpperCAmelCase , **_UpperCAmelCase ) @property def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): """simple docstring""" return self.encoder_attention_heads @property def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" return self.d_model def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" UpperCAmelCase__ = copy.deepcopy(self.__dict__ ) UpperCAmelCase__ = self.backbone_config.to_dict() UpperCAmelCase__ = self.__class__.model_type return output
346
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { 'google/vivit-b-16x2-kinetics400': ( 'https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json' ), # See all Vivit models at https://huggingface.co/models?filter=vivit } class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' lowerCAmelCase_ : Optional[int] = """vivit""" def __init__( self : List[str] , _UpperCAmelCase : List[Any]=2_24 , _UpperCAmelCase : List[str]=32 , _UpperCAmelCase : Any=[2, 16, 16] , _UpperCAmelCase : int=3 , _UpperCAmelCase : Optional[Any]=7_68 , _UpperCAmelCase : Union[str, Any]=12 , _UpperCAmelCase : Dict=12 , _UpperCAmelCase : Optional[Any]=30_72 , _UpperCAmelCase : Optional[int]="gelu_fast" , _UpperCAmelCase : Union[str, Any]=0.0 , _UpperCAmelCase : Tuple=0.0 , _UpperCAmelCase : Optional[int]=0.02 , _UpperCAmelCase : List[Any]=1E-06 , _UpperCAmelCase : List[str]=True , **_UpperCAmelCase : List[Any] , ): """simple docstring""" UpperCAmelCase__ = hidden_size UpperCAmelCase__ = num_hidden_layers UpperCAmelCase__ = num_attention_heads UpperCAmelCase__ = intermediate_size UpperCAmelCase__ = hidden_act UpperCAmelCase__ = hidden_dropout_prob UpperCAmelCase__ = attention_probs_dropout_prob UpperCAmelCase__ = initializer_range UpperCAmelCase__ = layer_norm_eps UpperCAmelCase__ = image_size UpperCAmelCase__ = num_frames UpperCAmelCase__ = tubelet_size UpperCAmelCase__ = num_channels UpperCAmelCase__ = qkv_bias super().__init__(**_UpperCAmelCase )
346
1
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_electra import ElectraTokenizer UpperCAmelCase__ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} UpperCAmelCase__ = { "vocab_file": { "google/electra-small-generator": ( "https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt" ), "google/electra-base-generator": "https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt", "google/electra-large-generator": ( "https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt" ), "google/electra-small-discriminator": ( "https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt" ), "google/electra-base-discriminator": ( "https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt" ), "google/electra-large-discriminator": ( "https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt" ), }, "tokenizer_file": { "google/electra-small-generator": ( "https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json" ), "google/electra-base-generator": ( "https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json" ), "google/electra-large-generator": ( "https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json" ), "google/electra-small-discriminator": ( "https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json" ), "google/electra-base-discriminator": ( "https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json" ), "google/electra-large-discriminator": ( "https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json" ), }, } UpperCAmelCase__ = { "google/electra-small-generator": 512, "google/electra-base-generator": 512, "google/electra-large-generator": 512, "google/electra-small-discriminator": 512, "google/electra-base-discriminator": 512, "google/electra-large-discriminator": 512, } UpperCAmelCase__ = { "google/electra-small-generator": {"do_lower_case": True}, "google/electra-base-generator": {"do_lower_case": True}, "google/electra-large-generator": {"do_lower_case": True}, "google/electra-small-discriminator": {"do_lower_case": True}, "google/electra-base-discriminator": {"do_lower_case": True}, "google/electra-large-discriminator": {"do_lower_case": True}, } class lowercase_ ( lowercase ): '''simple docstring''' __snake_case = VOCAB_FILES_NAMES __snake_case = PRETRAINED_VOCAB_FILES_MAP __snake_case = PRETRAINED_INIT_CONFIGURATION __snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __snake_case = ElectraTokenizer def __init__( self : Dict , __UpperCAmelCase : int=None , __UpperCAmelCase : str=None , __UpperCAmelCase : Optional[int]=True , __UpperCAmelCase : str="[UNK]" , __UpperCAmelCase : Any="[SEP]" , __UpperCAmelCase : str="[PAD]" , __UpperCAmelCase : Optional[Any]="[CLS]" , __UpperCAmelCase : Union[str, Any]="[MASK]" , __UpperCAmelCase : List[str]=True , __UpperCAmelCase : Tuple=None , **__UpperCAmelCase : Optional[int] , ) ->str: """simple docstring""" super().__init__( __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , tokenize_chinese_chars=__UpperCAmelCase , strip_accents=__UpperCAmelCase , **__UpperCAmelCase , ) a = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , __UpperCAmelCase ) != do_lower_case or normalizer_state.get('''strip_accents''' , __UpperCAmelCase ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , __UpperCAmelCase ) != tokenize_chinese_chars ): a = getattr(__UpperCAmelCase , normalizer_state.pop('''type''' ) ) a = do_lower_case a = strip_accents a = tokenize_chinese_chars a = normalizer_class(**__UpperCAmelCase ) a = do_lower_case def __lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple=None ) ->str: """simple docstring""" a = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) ->List[int]: """simple docstring""" a = [self.sep_token_id] a = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __lowerCAmelCase ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) ->Tuple[str]: """simple docstring""" a = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase ) return tuple(__UpperCAmelCase )
0
'''simple docstring''' import warnings from ...utils import logging from .image_processing_deit import DeiTImageProcessor UpperCAmelCase_ = logging.get_logger(__name__) class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' def __init__( self : List[str] , *_UpperCAmelCase : Optional[int] , **_UpperCAmelCase : Optional[Any] ): """simple docstring""" warnings.warn( """The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use DeiTImageProcessor instead.""" , _UpperCAmelCase , ) super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
346
0
'''simple docstring''' import copy import tempfile import unittest from transformers import MaMaaaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : Any , snake_case_ : List[Any] , snake_case_ : Optional[Any]=None , snake_case_ : Any=None , snake_case_ : Optional[Any]=None , snake_case_ : Dict=None , snake_case_ : List[Any]=None , ) -> Optional[Any]: '''simple docstring''' if attention_mask is None: UpperCAmelCase_ = input_ids.ne(config.pad_token_id ) if decoder_attention_mask is None: UpperCAmelCase_ = decoder_input_ids.ne(config.pad_token_id ) if head_mask is None: UpperCAmelCase_ = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=snake_case_ ) if decoder_head_mask is None: UpperCAmelCase_ = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=snake_case_ ) if cross_attn_head_mask is None: UpperCAmelCase_ = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=snake_case_ ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } class __A : def __init__(self : Optional[int] , __a : List[str] , __a : str=13 , __a : Optional[Any]=7 , __a : Tuple=True , __a : Optional[Any]=False , __a : Any=99 , __a : int=16 , __a : Optional[Any]=2 , __a : List[Any]=4 , __a : Dict=4 , __a : List[Any]="relu" , __a : List[Any]=0.1 , __a : Any=0.1 , __a : int=0.0 , __a : Any=0.0 , __a : Optional[Any]=20 , __a : List[Any]=2 , __a : Tuple=1 , __a : Any=0 , ): UpperCAmelCase_ = parent UpperCAmelCase_ = batch_size UpperCAmelCase_ = seq_length UpperCAmelCase_ = is_training UpperCAmelCase_ = use_labels UpperCAmelCase_ = vocab_size UpperCAmelCase_ = hidden_size UpperCAmelCase_ = num_hidden_layers UpperCAmelCase_ = num_attention_heads UpperCAmelCase_ = intermediate_size UpperCAmelCase_ = hidden_act UpperCAmelCase_ = hidden_dropout_prob UpperCAmelCase_ = attention_probs_dropout_prob UpperCAmelCase_ = encoder_layerdrop UpperCAmelCase_ = decoder_layerdrop UpperCAmelCase_ = max_position_embeddings UpperCAmelCase_ = eos_token_id UpperCAmelCase_ = pad_token_id UpperCAmelCase_ = bos_token_id def _lowercase (self : Union[str, Any] ): UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase_ = self.eos_token_id # Eos Token UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) # we need to clamp the input ids here to avoid having pad token in between # this is because for M2M100 the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input UpperCAmelCase_ = input_ids.clamp(self.pad_token_id + 1 ) UpperCAmelCase_ = decoder_input_ids.clamp(self.pad_token_id + 1 ) UpperCAmelCase_ = self.get_config() UpperCAmelCase_ = prepare_mam_aaa_inputs_dict(__a , __a , __a ) return config, inputs_dict def _lowercase (self : Optional[Any] ): return MaMaaaConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , ) def _lowercase (self : str ): UpperCAmelCase_ , UpperCAmelCase_ = self.prepare_config_and_inputs() return config, inputs_dict def _lowercase (self : int , __a : int , __a : Union[str, Any] ): UpperCAmelCase_ = MaMaaaModel(config=__a ).get_decoder().to(__a ).eval() UpperCAmelCase_ = inputs_dict["input_ids"] UpperCAmelCase_ = inputs_dict["attention_mask"] UpperCAmelCase_ = inputs_dict["head_mask"] # first forward pass UpperCAmelCase_ = model(__a , attention_mask=__a , head_mask=__a , use_cache=__a ) UpperCAmelCase_ , UpperCAmelCase_ = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids UpperCAmelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size ) UpperCAmelCase_ = ids_tensor((self.batch_size, 3) , 2 ) # append to next input_ids and UpperCAmelCase_ = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCAmelCase_ = torch.cat([attention_mask, next_attn_mask] , dim=-1 ) UpperCAmelCase_ = model(__a , attention_mask=__a )["last_hidden_state"] UpperCAmelCase_ = model(__a , attention_mask=__a , past_key_values=__a )[ "last_hidden_state" ] # select random slice UpperCAmelCase_ = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCAmelCase_ = output_from_no_past[:, -3:, random_slice_idx].detach() UpperCAmelCase_ = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__a , __a , atol=1E-2 ) ) def _lowercase (self : Optional[Any] , __a : Any , __a : Optional[int] ): UpperCAmelCase_ = MaMaaaModel(config=__a ).to(__a ).eval() UpperCAmelCase_ = model(**__a ) UpperCAmelCase_ = outputs.encoder_last_hidden_state UpperCAmelCase_ = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase_ = model.get_encoder() encoder.save_pretrained(__a ) UpperCAmelCase_ = MaMaaaEncoder.from_pretrained(__a ).to(__a ) UpperCAmelCase_ = encoder(inputs_dict["input_ids"] , attention_mask=inputs_dict["attention_mask"] )[ 0 ] self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 ) with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase_ = model.get_decoder() decoder.save_pretrained(__a ) UpperCAmelCase_ = MaMaaaDecoder.from_pretrained(__a ).to(__a ) UpperCAmelCase_ = decoder( input_ids=inputs_dict["decoder_input_ids"] , attention_mask=inputs_dict["decoder_attention_mask"] , encoder_hidden_states=__a , encoder_attention_mask=inputs_dict["attention_mask"] , )[0] self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 ) @require_torch class __A ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ): a__ : List[Any] = ( ( MaMaaaModel, MaMaaaForConditionalGeneration, ) if is_torch_available() else () ) a__ : int = (MaMaaaForConditionalGeneration,) if is_torch_available() else () a__ : Optional[int] = ( { """conversational""": MaMaaaForConditionalGeneration, """feature-extraction""": MaMaaaModel, """summarization""": MaMaaaForConditionalGeneration, """text2text-generation""": MaMaaaForConditionalGeneration, """translation""": MaMaaaForConditionalGeneration, } if is_torch_available() else {} ) a__ : Union[str, Any] = True a__ : Tuple = True a__ : List[str] = False a__ : Union[str, Any] = False def _lowercase (self : Any , __a : Optional[Any] , __a : Dict , __a : int , __a : str , __a : str ): if pipeline_test_casse_name == "TranslationPipelineTests": # Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`. # `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer. return True return False def _lowercase (self : Dict ): UpperCAmelCase_ = MaMaaaModelTester(self ) UpperCAmelCase_ = ConfigTester(self , config_class=__a ) def _lowercase (self : int ): self.config_tester.run_common_tests() def _lowercase (self : List[Any] ): UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: UpperCAmelCase_ = model_class(__a ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__a ) UpperCAmelCase_ , UpperCAmelCase_ = model_class.from_pretrained(__a , output_loading_info=__a ) self.assertEqual(info["missing_keys"] , [] ) def _lowercase (self : Optional[int] ): UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*__a ) def _lowercase (self : Dict ): UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*__a ) def _lowercase (self : List[Any] ): UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration): UpperCAmelCase_ = model_class(__a ) model.to(__a ) model.eval() UpperCAmelCase_ = copy.deepcopy(self._prepare_for_class(__a , __a ) ) if not self.is_encoder_decoder: UpperCAmelCase_ = inputs["input_ids"] del inputs["input_ids"] else: UpperCAmelCase_ = inputs["input_ids"] UpperCAmelCase_ = inputs.get("decoder_input_ids" , __a ) del inputs["input_ids"] inputs.pop("decoder_input_ids" , __a ) UpperCAmelCase_ = model.get_input_embeddings() if not self.is_encoder_decoder: UpperCAmelCase_ = wte(__a ) else: UpperCAmelCase_ = wte(__a ) UpperCAmelCase_ = wte(__a ) with torch.no_grad(): model(**__a )[0] def _lowercase (self : int ): UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() UpperCAmelCase_ = input_dict["input_ids"] UpperCAmelCase_ = input_ids.ne(1 ).to(__a ) UpperCAmelCase_ = MaMaaaForConditionalGeneration(__a ).eval().to(__a ) if torch_device == "cuda": model.half() model.generate(__a , attention_mask=__a ) model.generate(num_beams=4 , do_sample=__a , early_stopping=__a , num_return_sequences=3 ) def lowerCAmelCase_ ( snake_case_ : List[str] ) -> Union[str, Any]: '''simple docstring''' return torch.tensor(snake_case_ , dtype=torch.long , device=snake_case_ ) SCREAMING_SNAKE_CASE_: Any =1E-4 @require_torch @require_sentencepiece @require_tokenizers @slow class __A ( unittest.TestCase ): @cached_property def _lowercase (self : Optional[int] ): return MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" ) def _lowercase (self : List[Any] ): UpperCAmelCase_ = MaMaaaModel.from_pretrained("facebook/m2m100_418M" ).to(__a ) UpperCAmelCase_ = _long_tensor([[128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38, 2]] ) UpperCAmelCase_ = _long_tensor([[2, 128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38]] ) UpperCAmelCase_ = prepare_mam_aaa_inputs_dict(model.config , __a , __a ) with torch.no_grad(): UpperCAmelCase_ = model(**__a )[0] UpperCAmelCase_ = torch.Size((1, 11, 1024) ) self.assertEqual(output.shape , __a ) # change to expected output here UpperCAmelCase_ = torch.tensor( [[-0.77_80, -0.16_76, 0.10_38], [-6.75_56, -1.39_92, 0.05_67], [-7.53_83, -0.59_20, -0.27_79]] , device=__a ) self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=__a ) ) def _lowercase (self : str ): UpperCAmelCase_ = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(__a ) # change to intended input UpperCAmelCase_ = _long_tensor([[128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38, 2]] ) UpperCAmelCase_ = _long_tensor([[2, 128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38]] ) UpperCAmelCase_ = prepare_mam_aaa_inputs_dict(model.config , __a , __a ) with torch.no_grad(): UpperCAmelCase_ = model(**__a )[0] UpperCAmelCase_ = torch.Size((1, 11, model.config.vocab_size) ) self.assertEqual(output.shape , __a ) # change to expected output here UpperCAmelCase_ = torch.tensor( [[-1.04_48, -1.04_11, 3.79_92], [-3.21_91, -3.23_86, -1.34_51], [-3.62_10, -3.59_93, 0.49_25]] , device=__a ) self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=__a ) ) def _lowercase (self : List[Any] ): UpperCAmelCase_ = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(__a ) UpperCAmelCase_ = MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" , src_lang="fr" , tgt_lang="en" ) UpperCAmelCase_ = [ "L'affaire NSA souligne l'absence totale de débat sur le renseignement", "Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.", "Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent" " Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de" " l'ampleur de la surveillance américaine sur l'ensemble des communications en France.", ] # The below article tests that we don't add any hypotheses outside of the top n_beams UpperCAmelCase_ = tokenizer(__a , padding=__a , return_tensors="pt" ) UpperCAmelCase_ = model.generate( input_ids=dct["input_ids"].to(__a ) , attention_mask=dct["attention_mask"].to(__a ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("en" ) , ) UpperCAmelCase_ = [ "The NSA case highlights the total absence of intelligence debate", "I think there are two levels of response from the French government.", "When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S." " Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all" " communications in France.", ] UpperCAmelCase_ = tokenizer.batch_decode( hypotheses_batch.tolist() , clean_up_tokenization_spaces=__a , skip_special_tokens=__a ) assert generated == expected_en
1
'''simple docstring''' import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = {'vocab_file': 'spiece.model'} UpperCAmelCase_ = { 'vocab_file': { 'TsinghuaAI/CPM-Generate': 'https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model', } } class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' def __init__( self : Dict , _UpperCAmelCase : str , _UpperCAmelCase : Any=False , _UpperCAmelCase : int=True , _UpperCAmelCase : Union[str, Any]=False , _UpperCAmelCase : Dict="<s>" , _UpperCAmelCase : int="</s>" , _UpperCAmelCase : Dict="<unk>" , _UpperCAmelCase : Tuple="<sep>" , _UpperCAmelCase : List[Any]="<pad>" , _UpperCAmelCase : int="<cls>" , _UpperCAmelCase : Union[str, Any]="<mask>" , _UpperCAmelCase : List[str]=["<eop>", "<eod>"] , _UpperCAmelCase : Optional[Dict[str, Any]] = None , **_UpperCAmelCase : int , ): """simple docstring""" UpperCAmelCase__ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token UpperCAmelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=_UpperCAmelCase , remove_space=_UpperCAmelCase , keep_accents=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , ) UpperCAmelCase__ = 3 UpperCAmelCase__ = do_lower_case UpperCAmelCase__ = remove_space UpperCAmelCase__ = keep_accents UpperCAmelCase__ = vocab_file UpperCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_UpperCAmelCase ) try: import jieba except ModuleNotFoundError as error: raise error.__class__( """You need to install jieba to use CpmTokenizer or CpmTokenizerFast. """ """See https://pypi.org/project/jieba/ for installation.""" ) UpperCAmelCase__ = jieba UpperCAmelCase__ = str.maketrans(""" \n""" , """\u2582\u2583""" ) @property # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" return len(self.sp_model ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" UpperCAmelCase__ = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Dict ): """simple docstring""" UpperCAmelCase__ = self.__dict__.copy() UpperCAmelCase__ = None return state def __setstate__( self : Union[str, Any] , _UpperCAmelCase : Union[str, Any] ): """simple docstring""" UpperCAmelCase__ = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): UpperCAmelCase__ = {} UpperCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : Optional[Any] ): """simple docstring""" if self.remove_space: UpperCAmelCase__ = """ """.join(inputs.strip().split() ) else: UpperCAmelCase__ = inputs UpperCAmelCase__ = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" ) if not self.keep_accents: UpperCAmelCase__ = unicodedata.normalize("""NFKD""" , _UpperCAmelCase ) UpperCAmelCase__ = """""".join([c for c in outputs if not unicodedata.combining(_UpperCAmelCase )] ) if self.do_lower_case: UpperCAmelCase__ = outputs.lower() return outputs def SCREAMING_SNAKE_CASE__ ( self : Tuple , _UpperCAmelCase : str ): """simple docstring""" UpperCAmelCase__ = self.preprocess_text(_UpperCAmelCase ) UpperCAmelCase__ = self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase ) UpperCAmelCase__ = [] for piece in pieces: if len(_UpperCAmelCase ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit(): UpperCAmelCase__ = self.sp_model.EncodeAsPieces(piece[:-1].replace(_UpperCAmelCase , """""" ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: UpperCAmelCase__ = cur_pieces[1:] else: UpperCAmelCase__ = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(_UpperCAmelCase ) else: new_pieces.append(_UpperCAmelCase ) return new_pieces def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : Union[str, Any] ): """simple docstring""" return self.sp_model.PieceToId(_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : Any ): """simple docstring""" return self.sp_model.IdToPiece(_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : Dict ): """simple docstring""" UpperCAmelCase__ = """""".join(_UpperCAmelCase ).replace(_UpperCAmelCase , """ """ ).strip() return out_string def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ): """simple docstring""" UpperCAmelCase__ = [self.sep_token_id] UpperCAmelCase__ = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None , _UpperCAmelCase : bool = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase ) if token_ids_a is not None: return ([0] * len(_UpperCAmelCase )) + [1] + ([0] * len(_UpperCAmelCase )) + [1, 1] return ([0] * len(_UpperCAmelCase )) + [1, 1] def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ): """simple docstring""" UpperCAmelCase__ = [self.sep_token_id] UpperCAmelCase__ = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ): """simple docstring""" if not os.path.isdir(_UpperCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return UpperCAmelCase__ = os.path.join( _UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(_UpperCAmelCase , """wb""" ) as fi: UpperCAmelCase__ = self.sp_model.serialized_model_proto() fi.write(_UpperCAmelCase ) return (out_vocab_file,) def SCREAMING_SNAKE_CASE__ ( self : Tuple , *_UpperCAmelCase : Tuple , **_UpperCAmelCase : Optional[Any] ): """simple docstring""" UpperCAmelCase__ = super()._decode(*_UpperCAmelCase , **_UpperCAmelCase ) UpperCAmelCase__ = text.replace(""" """ , """""" ).replace("""\u2582""" , """ """ ).replace("""\u2583""" , """\n""" ) return text
346
0
'''simple docstring''' from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging lowerCamelCase : List[str] = logging.get_logger(__name__) lowerCamelCase : List[str] = { 'Salesforce/codegen-350M-nl': 'https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json', 'Salesforce/codegen-350M-multi': 'https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json', 'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json', 'Salesforce/codegen-2B-nl': 'https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json', 'Salesforce/codegen-2B-multi': 'https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json', 'Salesforce/codegen-2B-mono': 'https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json', 'Salesforce/codegen-6B-nl': 'https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json', 'Salesforce/codegen-6B-multi': 'https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json', 'Salesforce/codegen-6B-mono': 'https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json', 'Salesforce/codegen-16B-nl': 'https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json', 'Salesforce/codegen-16B-multi': 'https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json', 'Salesforce/codegen-16B-mono': 'https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json', } class __lowerCAmelCase (lowercase_ ): '''simple docstring''' lowerCAmelCase__ : Any = """codegen""" lowerCAmelCase__ : Union[str, Any] = { """max_position_embeddings""": """n_positions""", """hidden_size""": """n_embd""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__(self : Any , UpperCamelCase : List[Any]=50400 , UpperCamelCase : Optional[Any]=2048 , UpperCamelCase : List[Any]=2048 , UpperCamelCase : Optional[int]=4096 , UpperCamelCase : Union[str, Any]=28 , UpperCamelCase : Optional[Any]=16 , UpperCamelCase : Dict=64 , UpperCamelCase : Tuple=None , UpperCamelCase : Optional[int]="gelu_new" , UpperCamelCase : str=0.0 , UpperCamelCase : Dict=0.0 , UpperCamelCase : Tuple=0.0 , UpperCamelCase : int=1E-5 , UpperCamelCase : str=0.02 , UpperCamelCase : Union[str, Any]=True , UpperCamelCase : Optional[Any]=50256 , UpperCamelCase : Dict=50256 , UpperCamelCase : int=False , **UpperCamelCase : Optional[int] , ): '''simple docstring''' lowercase__ = vocab_size lowercase__ = n_ctx lowercase__ = n_positions lowercase__ = n_embd lowercase__ = n_layer lowercase__ = n_head lowercase__ = n_inner lowercase__ = rotary_dim lowercase__ = activation_function lowercase__ = resid_pdrop lowercase__ = embd_pdrop lowercase__ = attn_pdrop lowercase__ = layer_norm_epsilon lowercase__ = initializer_range lowercase__ = use_cache lowercase__ = bos_token_id lowercase__ = eos_token_id super().__init__( bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , tie_word_embeddings=UpperCamelCase , **UpperCamelCase ) class __lowerCAmelCase (lowercase_ ): '''simple docstring''' def __init__(self : Union[str, Any] , UpperCamelCase : PretrainedConfig , UpperCamelCase : str = "default" , UpperCamelCase : List[PatchingSpec] = None , UpperCamelCase : bool = False , ): '''simple docstring''' super().__init__(UpperCamelCase , task=UpperCamelCase , patching_specs=UpperCamelCase , use_past=UpperCamelCase ) if not getattr(self._config , '''pad_token_id''' , UpperCamelCase ): # TODO: how to do that better? lowercase__ = 0 @property def UpperCamelCase__ (self : List[str] ): '''simple docstring''' lowercase__ = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} ) if self.use_past: self.fill_with_past_key_values_(UpperCamelCase , direction='''inputs''' ) lowercase__ = {0: '''batch''', 1: '''past_sequence + sequence'''} else: lowercase__ = {0: '''batch''', 1: '''sequence'''} return common_inputs @property def UpperCamelCase__ (self : Any ): '''simple docstring''' return self._config.n_layer @property def UpperCamelCase__ (self : List[Any] ): '''simple docstring''' return self._config.n_head def UpperCamelCase__ (self : List[Any] , UpperCamelCase : PreTrainedTokenizer , UpperCamelCase : int = -1 , UpperCamelCase : int = -1 , UpperCamelCase : bool = False , UpperCamelCase : Optional[TensorType] = None , ): '''simple docstring''' lowercase__ = super(UpperCamelCase , self ).generate_dummy_inputs( UpperCamelCase , batch_size=UpperCamelCase , seq_length=UpperCamelCase , is_pair=UpperCamelCase , framework=UpperCamelCase ) # We need to order the input in the way they appears in the forward() lowercase__ = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch lowercase__ ,lowercase__ = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values lowercase__ = seqlen + 2 lowercase__ = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) lowercase__ = [ (torch.zeros(UpperCamelCase ), torch.zeros(UpperCamelCase )) for _ in range(self.num_layers ) ] lowercase__ = common_inputs['''attention_mask'''] if self.use_past: lowercase__ = ordered_inputs['''attention_mask'''].dtype lowercase__ = torch.cat( [ordered_inputs['''attention_mask'''], torch.ones(UpperCamelCase , UpperCamelCase , dtype=UpperCamelCase )] , dim=1 ) return ordered_inputs @property def UpperCamelCase__ (self : Optional[Any] ): '''simple docstring''' return 13
2
'''simple docstring''' import argparse import logging import os import datasets import tensorflow as tf from transformers import AutoTokenizer UpperCAmelCase_ = logging.getLogger(__name__) def _UpperCamelCase ( ): '''simple docstring''' UpperCAmelCase__ = argparse.ArgumentParser( description="""Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.""" ) parser.add_argument( """--dataset_name""" , type=SCREAMING_SNAKE_CASE__ , default="""wikitext""" , help="""Name of the training. Explore datasets at: hf.co/datasets.""" , ) parser.add_argument( """--dataset_config""" , type=SCREAMING_SNAKE_CASE__ , default="""wikitext-103-raw-v1""" , help="""Configuration name of the dataset.""" ) parser.add_argument( """--tokenizer_name_or_path""" , type=SCREAMING_SNAKE_CASE__ , default="""sayakpaul/unigram-tokenizer-wikitext""" , help="""Tokenizer identifier. Can be a local filepath or a Hub identifier.""" , ) parser.add_argument( """--shard_size""" , type=SCREAMING_SNAKE_CASE__ , default=1000 , help="""Number of entries to go in a single shard.""" , ) parser.add_argument("""--split""" , type=SCREAMING_SNAKE_CASE__ , default="""train""" , choices=["""train""", """test""", """validation"""] ) parser.add_argument( """--limit""" , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , help="""Limit the number of shards (used for debugging).""" , ) parser.add_argument( """--max_length""" , type=SCREAMING_SNAKE_CASE__ , default=512 , help="""Maximum sequence length. For training on TPUs, it helps to have a maximum""" """ sequence length that is a multiple of 8.""" , ) parser.add_argument( """--output_dir""" , default="""tf-tpu""" , type=SCREAMING_SNAKE_CASE__ , help="""Output directory where the TFRecord shards will be saved. If the""" """ path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord""" """ shards will be directly saved to a Google Cloud Storage bucket.""" , ) UpperCAmelCase__ = parser.parse_args() return args def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Any ): '''simple docstring''' def fn(SCREAMING_SNAKE_CASE__ : Union[str, Any] ): return tokenizer(examples["""text"""] ) return fn def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] ): '''simple docstring''' UpperCAmelCase__ = [] for i in range(len(tokenized_data["""input_ids"""] ) ): UpperCAmelCase__ = { """input_ids""": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["""input_ids"""][i] ) ), """attention_mask""": tf.train.Feature( intaa_list=tf.train.IntaaList(value=tokenized_data["""attention_mask"""][i] ) ), } UpperCAmelCase__ = tf.train.Features(feature=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = tf.train.Example(features=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = example.SerializeToString() records.append(SCREAMING_SNAKE_CASE__ ) return records def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Any ): '''simple docstring''' UpperCAmelCase__ = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split ) if args.limit is not None: UpperCAmelCase__ = min(len(SCREAMING_SNAKE_CASE__ ) , args.limit ) UpperCAmelCase__ = dataset.select(range(SCREAMING_SNAKE_CASE__ ) ) print(F'''Limiting the dataset to {args.limit} entries.''' ) UpperCAmelCase__ = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path ) # Handle output directory creation. # For serializing into a Google Cloud Storage Bucket, one needs to first # create a bucket. if "gs" not in args.output_dir: if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) UpperCAmelCase__ = os.path.join(args.output_dir , args.split ) if not os.path.exists(SCREAMING_SNAKE_CASE__ ): os.makedirs(SCREAMING_SNAKE_CASE__ ) else: UpperCAmelCase__ = os.path.join(args.output_dir , args.split ) # Tokenize the whole dataset at once. UpperCAmelCase__ = tokenize_function(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = dataset.map(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , num_proc=4 , remove_columns=["""text"""] ) # We need to concatenate all our texts together, and then split the result # into chunks of a fixed size, which we will call block_size. To do this, we # will use the map method again, with the option batched=True. When we use batched=True, # the function we pass to map() will be passed multiple inputs at once, allowing us # to group them into more or fewer examples than we had in the input. # This allows us to create our new fixed-length samples. The advantage of this # method is that we don't lose a whole lot of content from the dataset compared to the # case where we simply tokenize with a pre-defined max_length. def group_texts(SCREAMING_SNAKE_CASE__ : int ): # Concatenate all texts. UpperCAmelCase__ = {k: sum(examples[k] , [] ) for k in examples.keys()} UpperCAmelCase__ = len(concatenated_examples[list(examples.keys() )[0]] ) # We drop the small remainder, though you could add padding instead if the model supports it # In this, as in all things, we advise you to follow your heart 🫀 UpperCAmelCase__ = (total_length // args.max_length) * args.max_length # Split by chunks of max_len. UpperCAmelCase__ = { k: [t[i : i + args.max_length] for i in range(0 , SCREAMING_SNAKE_CASE__ , args.max_length )] for k, t in concatenated_examples.items() } return result UpperCAmelCase__ = dataset_tokenized.map(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , batch_size=1000 , num_proc=4 ) UpperCAmelCase__ = 0 UpperCAmelCase__ = 0 for shard in range(0 , len(SCREAMING_SNAKE_CASE__ ) , args.shard_size ): UpperCAmelCase__ = grouped_dataset[shard : shard + args.shard_size] UpperCAmelCase__ = len(dataset_snapshot["""input_ids"""] ) UpperCAmelCase__ = os.path.join(SCREAMING_SNAKE_CASE__ , F'''dataset-{shard_count}-{records_containing}.tfrecord''' ) UpperCAmelCase__ = get_serialized_examples(SCREAMING_SNAKE_CASE__ ) with tf.io.TFRecordWriter(SCREAMING_SNAKE_CASE__ ) as out_file: for i in range(len(SCREAMING_SNAKE_CASE__ ) ): UpperCAmelCase__ = serialized_examples[i] out_file.write(SCREAMING_SNAKE_CASE__ ) print("""Wrote file {} containing {} records""".format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) shard_count += 1 total_records += records_containing with open(F'''split-{args.split}-records-count.txt''' , """w""" ) as f: print(F'''Total {args.split} records: {total_records}''' , file=SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": UpperCAmelCase_ = parse_args() main(args)
346
0
'''simple docstring''' import requests from bsa import BeautifulSoup def lowerCAmelCase_ ( snake_case__ = "https://www.worldometers.info/coronavirus" ): '''simple docstring''' A : Optional[Any] = BeautifulSoup(requests.get(snake_case__ ).text , '''html.parser''' ) A : Any = soup.findAll('''h1''' ) A : Union[str, Any] = soup.findAll('''div''' , {'''class''': '''maincounter-number'''} ) keys += soup.findAll('''span''' , {'''class''': '''panel-title'''} ) values += soup.findAll('''div''' , {'''class''': '''number-table-main'''} ) return {key.text.strip(): value.text.strip() for key, value in zip(snake_case__ , snake_case__ )} if __name__ == "__main__": print('\033[1m' + 'COVID-19 Status of the World' + '\033[0m\n') for key, value in world_covidaa_stats().items(): print(f'''{key}\n{value}\n''')
3
'''simple docstring''' import numpy as np import torch from torch.nn import CrossEntropyLoss from transformers import AutoModelForCausalLM, AutoTokenizer import datasets from datasets import logging UpperCAmelCase_ = '\\n\n' UpperCAmelCase_ = '\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n' UpperCAmelCase_ = '\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to \'cuda\' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 78.22\n >>> print(round(results["perplexities"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = datasets.load_dataset("wikitext",\n ... "wikitext-2-raw-v1",\n ... split="test")["text"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!=\'\']\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 60.35\n >>> print(round(results["perplexities"][0], 2))\n 81.12\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase_ ( datasets.Metric ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Dict ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """input_texts""": datasets.Value("""string""" ), } ) , reference_urls=["""https://huggingface.co/docs/transformers/perplexity"""] , ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : int = 16 , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[int]=None ): """simple docstring""" if device is not None: assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu." if device == "gpu": UpperCAmelCase__ = """cuda""" else: UpperCAmelCase__ = """cuda""" if torch.cuda.is_available() else """cpu""" UpperCAmelCase__ = AutoModelForCausalLM.from_pretrained(_UpperCAmelCase ) UpperCAmelCase__ = model.to(_UpperCAmelCase ) UpperCAmelCase__ = AutoTokenizer.from_pretrained(_UpperCAmelCase ) # if batch_size > 1 (which generally leads to padding being required), and # if there is not an already assigned pad_token, assign an existing # special token to also be the padding token if tokenizer.pad_token is None and batch_size > 1: UpperCAmelCase__ = list(tokenizer.special_tokens_map_extended.values() ) # check that the model already has at least one special token defined assert ( len(_UpperCAmelCase ) > 0 ), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1." # assign one of the special tokens to also be the pad token tokenizer.add_special_tokens({"""pad_token""": existing_special_tokens[0]} ) if add_start_token: # leave room for <BOS> token to be added: assert ( tokenizer.bos_token is not None ), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False" UpperCAmelCase__ = model.config.max_length - 1 else: UpperCAmelCase__ = model.config.max_length UpperCAmelCase__ = tokenizer( _UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , return_tensors="""pt""" , return_attention_mask=_UpperCAmelCase , ).to(_UpperCAmelCase ) UpperCAmelCase__ = encodings["""input_ids"""] UpperCAmelCase__ = encodings["""attention_mask"""] # check that each input is long enough: if add_start_token: assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long." else: assert torch.all( torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings." UpperCAmelCase__ = [] UpperCAmelCase__ = CrossEntropyLoss(reduction="""none""" ) for start_index in logging.tqdm(range(0 , len(_UpperCAmelCase ) , _UpperCAmelCase ) ): UpperCAmelCase__ = min(start_index + batch_size , len(_UpperCAmelCase ) ) UpperCAmelCase__ = encoded_texts[start_index:end_index] UpperCAmelCase__ = attn_masks[start_index:end_index] if add_start_token: UpperCAmelCase__ = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(_UpperCAmelCase ) UpperCAmelCase__ = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 ) UpperCAmelCase__ = torch.cat( [torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(_UpperCAmelCase ), attn_mask] , dim=1 ) UpperCAmelCase__ = encoded_batch with torch.no_grad(): UpperCAmelCase__ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase ).logits UpperCAmelCase__ = out_logits[..., :-1, :].contiguous() UpperCAmelCase__ = labels[..., 1:].contiguous() UpperCAmelCase__ = attn_mask[..., 1:].contiguous() UpperCAmelCase__ = torch.expa( (loss_fct(shift_logits.transpose(1 , 2 ) , _UpperCAmelCase ) * shift_attention_mask_batch).sum(1 ) / shift_attention_mask_batch.sum(1 ) ) ppls += perplexity_batch.tolist() return {"perplexities": ppls, "mean_perplexity": np.mean(_UpperCAmelCase )}
346
0
'''simple docstring''' from __future__ import annotations import math def a_ ( lowerCamelCase : float , lowerCamelCase : int ): lowerCAmelCase = u for i in range(1 , lowerCamelCase ): lowerCAmelCase = temp * (u - i) return temp def a_ ( ): lowerCAmelCase = int(input('enter the numbers of values: ' ) ) lowerCAmelCase = [] for _ in range(lowerCamelCase ): y.append([] ) for i in range(lowerCamelCase ): for j in range(lowerCamelCase ): y[i].append(lowerCamelCase ) lowerCAmelCase = 0 print('enter the values of parameters in a list: ' ) lowerCAmelCase = list(map(lowerCamelCase , input().split() ) ) print('enter the values of corresponding parameters: ' ) for i in range(lowerCamelCase ): lowerCAmelCase = float(input() ) lowerCAmelCase = int(input('enter the value to interpolate: ' ) ) lowerCAmelCase = (value - x[0]) / (x[1] - x[0]) # for calculating forward difference table for i in range(1 , lowerCamelCase ): for j in range(n - i ): lowerCAmelCase = y[j + 1][i - 1] - y[j][i - 1] lowerCAmelCase = y[0][0] for i in range(1 , lowerCamelCase ): summ += (ucal(lowerCamelCase , lowerCamelCase ) * y[0][i]) / math.factorial(lowerCamelCase ) print(f'''the value at {value} is {summ}''' ) if __name__ == "__main__": main()
4
'''simple docstring''' def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int = 1000000 ): '''simple docstring''' UpperCAmelCase__ = [i - 1 for i in range(limit + 1 )] for i in range(2 , limit + 1 ): if phi[i] == i - 1: for j in range(2 * i , limit + 1 , SCREAMING_SNAKE_CASE__ ): phi[j] -= phi[j] // i return sum(phi[2 : limit + 1] ) if __name__ == "__main__": print(solution())
346
0
from collections import defaultdict from math import ceil, sqrt def UpperCAmelCase_ ( __snake_case = 1000000 , __snake_case = 10 ) -> int: """simple docstring""" _lowercase =defaultdict(__snake_case ) for outer_width in range(3 , (t_limit // 4) + 2 ): if outer_width * outer_width > t_limit: _lowercase =max( ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 ) else: _lowercase =1 hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2 for hole_width in range(__snake_case , outer_width - 1 , 2 ): count[outer_width * outer_width - hole_width * hole_width] += 1 return sum(1 for n in count.values() if 1 <= n <= 10 ) if __name__ == "__main__": print(f'''{solution() = }''')
5
'''simple docstring''' from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING UpperCAmelCase_ = logging.get_logger(__name__) @add_end_docstrings(lowerCamelCase_ ) class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' def __init__( self : Optional[Any] , *_UpperCAmelCase : Union[str, Any] , **_UpperCAmelCase : Dict ): """simple docstring""" super().__init__(*_UpperCAmelCase , **_UpperCAmelCase ) requires_backends(self , """vision""" ) self.check_model_type( TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING if self.framework == """tf""" else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING ) def SCREAMING_SNAKE_CASE__ ( self : str , _UpperCAmelCase : List[Any]=None ): """simple docstring""" UpperCAmelCase__ = {} if top_k is not None: UpperCAmelCase__ = top_k return {}, {}, postprocess_params def __call__( self : Any , _UpperCAmelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_UpperCAmelCase : str ): """simple docstring""" return super().__call__(_UpperCAmelCase , **_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , _UpperCAmelCase : Tuple ): """simple docstring""" UpperCAmelCase__ = load_image(_UpperCAmelCase ) UpperCAmelCase__ = self.image_processor(images=_UpperCAmelCase , return_tensors=self.framework ) return model_inputs def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : Tuple ): """simple docstring""" UpperCAmelCase__ = self.model(**_UpperCAmelCase ) return model_outputs def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : str=5 ): """simple docstring""" if top_k > self.model.config.num_labels: UpperCAmelCase__ = self.model.config.num_labels if self.framework == "pt": UpperCAmelCase__ = model_outputs.logits.softmax(-1 )[0] UpperCAmelCase__ , UpperCAmelCase__ = probs.topk(_UpperCAmelCase ) elif self.framework == "tf": UpperCAmelCase__ = stable_softmax(model_outputs.logits , axis=-1 )[0] UpperCAmelCase__ = tf.math.top_k(_UpperCAmelCase , k=_UpperCAmelCase ) UpperCAmelCase__ , UpperCAmelCase__ = topk.values.numpy(), topk.indices.numpy() else: raise ValueError(f'''Unsupported framework: {self.framework}''' ) UpperCAmelCase__ = scores.tolist() UpperCAmelCase__ = ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCAmelCase , _UpperCAmelCase )]
346
0
import unittest import torch from diffusers import VQModel from diffusers.utils import floats_tensor, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class __A( a , a , unittest.TestCase ): snake_case_ = VQModel snake_case_ = '''sample''' @property def SCREAMING_SNAKE_CASE_ ( self , _snake_case=(32, 32) ) -> Union[str, Any]: '''simple docstring''' __a = 4 __a = 3 __a = floats_tensor((batch_size, num_channels) + sizes ).to(_snake_case ) return {"sample": image} @property def SCREAMING_SNAKE_CASE_ ( self ) -> str: '''simple docstring''' return (3, 32, 32) @property def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]: '''simple docstring''' return (3, 32, 32) def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]: '''simple docstring''' __a = { '''block_out_channels''': [32, 64], '''in_channels''': 3, '''out_channels''': 3, '''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''], '''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''], '''latent_channels''': 3, } __a = self.dummy_input return init_dict, inputs_dict def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]: '''simple docstring''' pass def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple: '''simple docstring''' pass def SCREAMING_SNAKE_CASE_ ( self ) -> Dict: '''simple docstring''' __a , __a = VQModel.from_pretrained('''fusing/vqgan-dummy''' , output_loading_info=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 ) model.to(_snake_case ) __a = model(**self.dummy_input ) assert image is not None, "Make sure output is not None" def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]: '''simple docstring''' __a = VQModel.from_pretrained('''fusing/vqgan-dummy''' ) model.to(_snake_case ).eval() torch.manual_seed(0 ) if torch.cuda.is_available(): torch.cuda.manual_seed_all(0 ) __a = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size ) __a = image.to(_snake_case ) with torch.no_grad(): __a = model(_snake_case ).sample __a = output[0, -1, -3:, -3:].flatten().cpu() # fmt: off __a = torch.tensor([-0.0153, -0.4044, -0.1880, -0.5161, -0.2418, -0.4072, -0.1612, -0.0633, -0.0143] ) # fmt: on self.assertTrue(torch.allclose(_snake_case , _snake_case , atol=1E-3 ) )
6
'''simple docstring''' from math import factorial def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int = 20 ): '''simple docstring''' UpperCAmelCase__ = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1, # 2, 3,... UpperCAmelCase__ = n // 2 return int(factorial(SCREAMING_SNAKE_CASE__ ) / (factorial(SCREAMING_SNAKE_CASE__ ) * factorial(n - k )) ) if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution(2_0)) else: try: UpperCAmelCase_ = int(sys.argv[1]) print(solution(n)) except ValueError: print('Invalid entry - please enter a number.')
346
0
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = "▁" lowercase_ = {"vocab_file": "sentencepiece.bpe.model", "monolingual_vocab_file": "dict.txt"} lowercase_ = { "vocab_file": { "vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model", }, "monolingual_vocab_file": { "vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt", }, } lowercase_ = {"vinai/bartpho-syllable": 1024} class A ( _UpperCAmelCase ): """simple docstring""" lowerCamelCase = VOCAB_FILES_NAMES lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase = ['input_ids', 'attention_mask'] def __init__( self : Any,lowercase_ : Union[str, Any],lowercase_ : List[Any],lowercase_ : Optional[int]="<s>",lowercase_ : List[Any]="</s>",lowercase_ : Any="</s>",lowercase_ : Tuple="<s>",lowercase_ : Optional[int]="<unk>",lowercase_ : str="<pad>",lowercase_ : Any="<mask>",lowercase_ : Optional[Dict[str, Any]] = None,**lowercase_ : Optional[Any],)-> None: '''simple docstring''' A__ = AddedToken(lowercase_,lstrip=lowercase_,rstrip=lowercase_ ) if isinstance(lowercase_,lowercase_ ) else mask_token A__ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=lowercase_,eos_token=lowercase_,unk_token=lowercase_,sep_token=lowercase_,cls_token=lowercase_,pad_token=lowercase_,mask_token=lowercase_,sp_model_kwargs=self.sp_model_kwargs,**lowercase_,) A__ = vocab_file A__ = monolingual_vocab_file A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(lowercase_ ) ) # Load the reduced vocab # Keep order of special tokens for backward compatibility A__ = {} A__ = 0 for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]: if str(lowercase_ ) not in self.fairseq_tokens_to_ids: A__ = cnt cnt += 1 with open(lowercase_,'r',encoding='utf-8' ) as f: for line in f.readlines(): A__ = line.strip().split()[0] A__ = len(self.fairseq_tokens_to_ids ) if str(lowercase_ ) not in self.fairseq_tokens_to_ids: A__ = len(self.fairseq_tokens_to_ids ) A__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self : Optional[int] )-> Optional[int]: '''simple docstring''' A__ = self.__dict__.copy() A__ = None A__ = self.sp_model.serialized_model_proto() return state def __setstate__( self : Union[str, Any],lowercase_ : Optional[int] )-> str: '''simple docstring''' A__ = d # for backward compatibility if not hasattr(self,'sp_model_kwargs' ): A__ = {} A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def snake_case__ ( self : Optional[Any],lowercase_ : List[int],lowercase_ : Optional[List[int]] = None )-> List[int]: '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] A__ = [self.cls_token_id] A__ = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def snake_case__ ( self : int,lowercase_ : List[int],lowercase_ : Optional[List[int]] = None,lowercase_ : bool = False )-> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowercase_,token_ids_a=lowercase_,already_has_special_tokens=lowercase_ ) if token_ids_a is None: return [1] + ([0] * len(lowercase_ )) + [1] return [1] + ([0] * len(lowercase_ )) + [1, 1] + ([0] * len(lowercase_ )) + [1] def snake_case__ ( self : Any,lowercase_ : List[int],lowercase_ : Optional[List[int]] = None )-> List[int]: '''simple docstring''' A__ = [self.sep_token_id] A__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def snake_case__ ( self : List[Any] )-> str: '''simple docstring''' return len(self.fairseq_ids_to_tokens ) def snake_case__ ( self : int )-> Union[str, Any]: '''simple docstring''' A__ = {self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def snake_case__ ( self : Dict,lowercase_ : str )-> List[str]: '''simple docstring''' return self.sp_model.encode(lowercase_,out_type=lowercase_ ) def snake_case__ ( self : List[str],lowercase_ : int )-> Optional[Any]: '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] else: return self.unk_token_id def snake_case__ ( self : Dict,lowercase_ : str )-> List[Any]: '''simple docstring''' return self.fairseq_ids_to_tokens[index] def snake_case__ ( self : List[Any],lowercase_ : List[str] )-> str: '''simple docstring''' A__ = ''.join(lowercase_ ).replace(lowercase_,' ' ).strip() return out_string def snake_case__ ( self : List[str],lowercase_ : str,lowercase_ : Optional[str] = None )-> Tuple[str]: '''simple docstring''' if not os.path.isdir(lowercase_ ): logger.error(F'Vocabulary path ({save_directory}) should be a directory' ) return A__ = os.path.join( lowercase_,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) A__ = os.path.join( lowercase_,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['monolingual_vocab_file'],) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file,lowercase_ ) elif not os.path.isfile(self.vocab_file ): with open(lowercase_,'wb' ) as fi: A__ = self.sp_model.serialized_model_proto() fi.write(lowercase_ ) if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath( lowercase_ ) and os.path.isfile(self.monolingual_vocab_file ): copyfile(self.monolingual_vocab_file,lowercase_ ) elif not os.path.isfile(self.monolingual_vocab_file ): with open(lowercase_,'w',encoding='utf-8' ) as fp: for token in self.fairseq_tokens_to_ids: if token not in self.all_special_tokens: fp.write(F'{str(lowercase_ )} \n' ) return out_vocab_file, out_monolingual_vocab_file
7
'''simple docstring''' import json import os import unittest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ : int = MgpstrTokenizer lowerCAmelCase_ : List[str] = False lowerCAmelCase_ : Optional[int] = {} lowerCAmelCase_ : Any = False def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): """simple docstring""" super().setUp() # fmt: off UpperCAmelCase__ = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""] # fmt: on UpperCAmelCase__ = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) ) UpperCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(_UpperCAmelCase ) + """\n""" ) def SCREAMING_SNAKE_CASE__ ( self : List[str] , **_UpperCAmelCase : Optional[Any] ): """simple docstring""" return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : Tuple ): """simple docstring""" UpperCAmelCase__ = """tester""" UpperCAmelCase__ = """tester""" return input_text, output_text @unittest.skip("""MGP-STR always lower cases letters.""" ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" pass def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" UpperCAmelCase__ = self.get_tokenizers(do_lower_case=_UpperCAmelCase ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): UpperCAmelCase__ = """[SPECIAL_TOKEN]""" tokenizer.add_special_tokens({"""cls_token""": special_token} ) UpperCAmelCase__ = tokenizer.encode([special_token] , add_special_tokens=_UpperCAmelCase ) self.assertEqual(len(_UpperCAmelCase ) , 1 ) UpperCAmelCase__ = tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase ) self.assertTrue(special_token not in decoded ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" UpperCAmelCase__ = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): UpperCAmelCase__ , UpperCAmelCase__ = self.get_input_output_texts(_UpperCAmelCase ) UpperCAmelCase__ = tokenizer.tokenize(_UpperCAmelCase ) UpperCAmelCase__ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) UpperCAmelCase__ = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = tokenizer.convert_ids_to_tokens(_UpperCAmelCase ) self.assertNotEqual(len(_UpperCAmelCase ) , 0 ) UpperCAmelCase__ = tokenizer.decode(_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase ) self.assertEqual(text_a.replace(""" """ , """""" ) , _UpperCAmelCase ) @unittest.skip("""MGP-STR tokenizer only handles one sequence.""" ) def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" pass @unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""" ) def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" pass
346
0
import shutil import tempfile import unittest from transformers import ( SPIECE_UNDERLINE, AddedToken, BatchEncoding, NllbTokenizer, NllbTokenizerFast, is_torch_available, ) from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase_ = get_tests_dir('''fixtures/test_sentencepiece.model''') if is_torch_available(): from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right lowerCAmelCase_ = 25_60_47 lowerCAmelCase_ = 25_61_45 @require_sentencepiece @require_tokenizers class snake_case_ ( __A , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = NllbTokenizer SCREAMING_SNAKE_CASE : Any = NllbTokenizerFast SCREAMING_SNAKE_CASE : str = True SCREAMING_SNAKE_CASE : List[str] = True SCREAMING_SNAKE_CASE : Union[str, Any] = {} def snake_case__( self : List[str] ) ->Union[str, Any]: super().setUp() # We have a SentencePiece fixture for testing snake_case_ = NllbTokenizer(_UpperCamelCase , keep_accents=_UpperCamelCase ) tokenizer.save_pretrained(self.tmpdirname ) def snake_case__( self : List[str] ) ->int: snake_case_ = NllbTokenizer(_UpperCamelCase , keep_accents=_UpperCamelCase ) snake_case_ = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(_UpperCamelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , ) snake_case_ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( _UpperCamelCase , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) snake_case_ = tokenizer.convert_tokens_to_ids(_UpperCamelCase ) self.assertListEqual( _UpperCamelCase , [ value + tokenizer.fairseq_offset for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4] ] , ) snake_case_ = tokenizer.convert_ids_to_tokens(_UpperCamelCase ) self.assertListEqual( _UpperCamelCase , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) def snake_case__( self : List[str] ) ->Any: snake_case_ = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-nllb''', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): snake_case_ = self.rust_tokenizer_class.from_pretrained(_UpperCamelCase , **_UpperCamelCase ) snake_case_ = self.tokenizer_class.from_pretrained(_UpperCamelCase , **_UpperCamelCase ) snake_case_ = tempfile.mkdtemp() snake_case_ = tokenizer_r.save_pretrained(_UpperCamelCase ) snake_case_ = tokenizer_p.save_pretrained(_UpperCamelCase ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) snake_case_ = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f ) self.assertSequenceEqual(_UpperCamelCase , _UpperCamelCase ) # Checks everything loads correctly in the same way snake_case_ = tokenizer_r.from_pretrained(_UpperCamelCase ) snake_case_ = tokenizer_p.from_pretrained(_UpperCamelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_UpperCamelCase , _UpperCamelCase ) ) shutil.rmtree(_UpperCamelCase ) # Save tokenizer rust, legacy_format=True snake_case_ = tempfile.mkdtemp() snake_case_ = tokenizer_r.save_pretrained(_UpperCamelCase , legacy_format=_UpperCamelCase ) snake_case_ = tokenizer_p.save_pretrained(_UpperCamelCase ) # Checks it save with the same files self.assertSequenceEqual(_UpperCamelCase , _UpperCamelCase ) # Checks everything loads correctly in the same way snake_case_ = tokenizer_r.from_pretrained(_UpperCamelCase ) snake_case_ = tokenizer_p.from_pretrained(_UpperCamelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_UpperCamelCase , _UpperCamelCase ) ) shutil.rmtree(_UpperCamelCase ) # Save tokenizer rust, legacy_format=False snake_case_ = tempfile.mkdtemp() snake_case_ = tokenizer_r.save_pretrained(_UpperCamelCase , legacy_format=_UpperCamelCase ) snake_case_ = tokenizer_p.save_pretrained(_UpperCamelCase ) # Checks it saved the tokenizer.json file self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way snake_case_ = tokenizer_r.from_pretrained(_UpperCamelCase ) snake_case_ = tokenizer_p.from_pretrained(_UpperCamelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_UpperCamelCase , _UpperCamelCase ) ) shutil.rmtree(_UpperCamelCase ) @require_torch def snake_case__( self : Any ) ->Optional[Any]: if not self.test_seqaseq: return snake_case_ = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # Longer text that will definitely require truncation. snake_case_ = [ ''' UN Chief Says There Is No Military Solution in Syria''', ''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for''' ''' Syria is that \'there is no military solution\' to the nearly five-year conflict and more weapons''' ''' will only worsen the violence and misery for millions of people.''', ] snake_case_ = [ '''Şeful ONU declară că nu există o soluţie militară în Siria''', '''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al''' ''' Rusiei pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi''' ''' că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''', ] try: snake_case_ = tokenizer.prepare_seqaseq_batch( src_texts=_UpperCamelCase , tgt_texts=_UpperCamelCase , max_length=3 , max_target_length=1_0 , return_tensors='''pt''' , src_lang='''eng_Latn''' , tgt_lang='''ron_Latn''' , ) except NotImplementedError: return self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.labels.shape[1] , 1_0 ) # max_target_length will default to max_length if not specified snake_case_ = tokenizer.prepare_seqaseq_batch( _UpperCamelCase , tgt_texts=_UpperCamelCase , max_length=3 , return_tensors='''pt''' ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.labels.shape[1] , 3 ) snake_case_ = tokenizer.prepare_seqaseq_batch( src_texts=_UpperCamelCase , max_length=3 , max_target_length=1_0 , return_tensors='''pt''' ) self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 ) self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 ) self.assertNotIn('''decoder_input_ids''' , _UpperCamelCase ) @unittest.skip('''Unfortunately way too slow to build a BPE with SentencePiece.''' ) def snake_case__( self : List[str] ) ->Tuple: pass def snake_case__( self : Any ) ->Any: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): snake_case_ = [AddedToken('''<special>''' , lstrip=_UpperCamelCase )] snake_case_ = self.rust_tokenizer_class.from_pretrained( _UpperCamelCase , additional_special_tokens=_UpperCamelCase , **_UpperCamelCase ) snake_case_ = tokenizer_r.encode('''Hey this is a <special> token''' ) snake_case_ = tokenizer_r.encode('''<special>''' , add_special_tokens=_UpperCamelCase )[0] self.assertTrue(special_token_id in r_output ) if self.test_slow_tokenizer: snake_case_ = self.rust_tokenizer_class.from_pretrained( _UpperCamelCase , additional_special_tokens=_UpperCamelCase , **_UpperCamelCase , ) snake_case_ = self.tokenizer_class.from_pretrained( _UpperCamelCase , additional_special_tokens=_UpperCamelCase , **_UpperCamelCase ) snake_case_ = tokenizer_p.encode('''Hey this is a <special> token''' ) snake_case_ = tokenizer_cr.encode('''Hey this is a <special> token''' ) self.assertEqual(_UpperCamelCase , _UpperCamelCase ) self.assertEqual(_UpperCamelCase , _UpperCamelCase ) self.assertTrue(special_token_id in p_output ) self.assertTrue(special_token_id in cr_output ) @require_torch @require_sentencepiece @require_tokenizers class snake_case_ ( unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = "facebook/nllb-200-distilled-600M" SCREAMING_SNAKE_CASE : int = [ " UN Chief Says There Is No Military Solution in Syria", " Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.", ] SCREAMING_SNAKE_CASE : Dict = [ "Şeful ONU declară că nu există o soluţie militară în Siria", "Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei" " pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor" " face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.", ] SCREAMING_SNAKE_CASE : Dict = [ 256047, 16297, 134408, 8165, 248066, 14734, 950, 1135, 105721, 3573, 83, 27352, 108, 49486, 2, ] @classmethod def snake_case__( cls : Optional[int] ) ->int: snake_case_ = NllbTokenizer.from_pretrained( cls.checkpoint_name , src_lang='''eng_Latn''' , tgt_lang='''ron_Latn''' ) snake_case_ = 1 return cls def snake_case__( self : Optional[int] ) ->Any: self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ace_Arab'''] , 2_5_6_0_0_1 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ace_Latn'''] , 2_5_6_0_0_2 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''fra_Latn'''] , 2_5_6_0_5_7 ) def snake_case__( self : str ) ->Tuple: snake_case_ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , _UpperCamelCase ) def snake_case__( self : Any ) ->List[str]: self.assertIn(_UpperCamelCase , self.tokenizer.all_special_ids ) # fmt: off snake_case_ = [RO_CODE, 4_2_5_4, 9_8_0_6_8, 1_1_2_9_2_3, 3_9_0_7_2, 3_9_0_9, 7_1_3, 1_0_2_7_6_7, 2_6, 1_7_3_1_4, 3_5_6_4_2, 1_4_6_8_3, 3_3_1_1_8, 2_0_2_2, 6_6_9_8_7, 2, 2_5_6_0_4_7] # fmt: on snake_case_ = self.tokenizer.decode(_UpperCamelCase , skip_special_tokens=_UpperCamelCase ) snake_case_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_UpperCamelCase ) self.assertEqual(_UpperCamelCase , _UpperCamelCase ) self.assertNotIn(self.tokenizer.eos_token , _UpperCamelCase ) def snake_case__( self : Dict ) ->str: snake_case_ = ['''this is gunna be a long sentence ''' * 2_0] assert isinstance(src_text[0] , _UpperCamelCase ) snake_case_ = 1_0 snake_case_ = self.tokenizer(_UpperCamelCase , max_length=_UpperCamelCase , truncation=_UpperCamelCase ).input_ids[0] self.assertEqual(ids[-1] , 2 ) self.assertEqual(ids[0] , _UpperCamelCase ) self.assertEqual(len(_UpperCamelCase ) , _UpperCamelCase ) def snake_case__( self : Tuple ) ->Union[str, Any]: self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [2_5_6_2_0_3, 3] ) def snake_case__( self : Dict ) ->List[Any]: snake_case_ = tempfile.mkdtemp() snake_case_ = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(_UpperCamelCase ) snake_case_ = NllbTokenizer.from_pretrained(_UpperCamelCase ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _UpperCamelCase ) @require_torch def snake_case__( self : str ) ->Dict: snake_case_ = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=_UpperCamelCase , truncation=_UpperCamelCase , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , ) snake_case_ = shift_tokens_right( batch['''labels'''] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id['''ron_Latn'''] ) self.assertIsInstance(_UpperCamelCase , _UpperCamelCase ) self.assertEqual((2, 1_5) , batch.input_ids.shape ) self.assertEqual((2, 1_5) , batch.attention_mask.shape ) snake_case_ = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , _UpperCamelCase ) self.assertEqual(_UpperCamelCase , batch.decoder_input_ids[0, 0] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) def snake_case__( self : List[Any] ) ->Any: snake_case_ = self.tokenizer(self.src_text , padding=_UpperCamelCase , truncation=_UpperCamelCase , max_length=3 , return_tensors='''pt''' ) snake_case_ = self.tokenizer( text_target=self.tgt_text , padding=_UpperCamelCase , truncation=_UpperCamelCase , max_length=1_0 , return_tensors='''pt''' ) snake_case_ = targets['''input_ids'''] snake_case_ = shift_tokens_right( _UpperCamelCase , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 ) @require_torch def snake_case__( self : Optional[int] ) ->Union[str, Any]: snake_case_ = self.tokenizer._build_translation_inputs( '''A test''' , return_tensors='''pt''' , src_lang='''eng_Latn''' , tgt_lang='''fra_Latn''' ) self.assertEqual( nested_simplify(_UpperCamelCase ) , { # A, test, EOS, en_XX '''input_ids''': [[2_5_6_0_4_7, 7_0, 7_3_5_6, 2]], '''attention_mask''': [[1, 1, 1, 1]], # ar_AR '''forced_bos_token_id''': 2_5_6_0_5_7, } , ) @require_torch def snake_case__( self : List[str] ) ->str: snake_case_ = True snake_case_ = self.tokenizer( '''UN Chief says there is no military solution in Syria''' , src_lang='''eng_Latn''' , tgt_lang='''fra_Latn''' ) self.assertEqual( inputs.input_ids , [1_6_2_9_7, 1_3_4_4_0_8, 2_5_6_5_3, 6_3_7_0, 2_4_8, 2_5_4, 1_0_3_9_2_9, 9_4_9_9_5, 1_0_8, 4_9_4_8_6, 2, 2_5_6_0_4_7] ) snake_case_ = False snake_case_ = self.tokenizer( '''UN Chief says there is no military solution in Syria''' , src_lang='''eng_Latn''' , tgt_lang='''fra_Latn''' ) self.assertEqual( inputs.input_ids , [2_5_6_0_4_7, 1_6_2_9_7, 1_3_4_4_0_8, 2_5_6_5_3, 6_3_7_0, 2_4_8, 2_5_4, 1_0_3_9_2_9, 9_4_9_9_5, 1_0_8, 4_9_4_8_6, 2] )
8
'''simple docstring''' from abc import ABC, abstractmethod from typing import List, Optional class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' def __init__( self : Union[str, Any] ): """simple docstring""" self.test() def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" UpperCAmelCase__ = 0 UpperCAmelCase__ = False while not completed: if counter == 1: self.reset() UpperCAmelCase__ = self.advance() if not self.does_advance(_UpperCAmelCase ): raise Exception( """Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.""" ) UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.update(_UpperCAmelCase ) counter += 1 if counter > 1_00_00: raise Exception("""update() does not fulfill the constraint.""" ) if self.remaining() != 0: raise Exception("""Custom Constraint is not defined correctly.""" ) @abstractmethod def SCREAMING_SNAKE_CASE__ ( self : Dict ): """simple docstring""" raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : int ): """simple docstring""" raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def SCREAMING_SNAKE_CASE__ ( self : Any , _UpperCAmelCase : int ): """simple docstring""" raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): """simple docstring""" raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : List[Any]=False ): """simple docstring""" raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' def __init__( self : Union[str, Any] , _UpperCAmelCase : List[int] ): """simple docstring""" super(_UpperCAmelCase , self ).__init__() if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or len(_UpperCAmelCase ) == 0: raise ValueError(f'''`token_ids` has to be a non-empty list, but is {token_ids}.''' ) if any((not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or token_id < 0) for token_id in token_ids ): raise ValueError(f'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' ) UpperCAmelCase__ = token_ids UpperCAmelCase__ = len(self.token_ids ) UpperCAmelCase__ = -1 # the index of the currently fulfilled step UpperCAmelCase__ = False def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" if self.completed: return None return self.token_ids[self.fulfilled_idx + 1] def SCREAMING_SNAKE_CASE__ ( self : List[str] , _UpperCAmelCase : int ): """simple docstring""" if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(_UpperCAmelCase )}''' ) if self.completed: return False return token_id == self.token_ids[self.fulfilled_idx + 1] def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , _UpperCAmelCase : int ): """simple docstring""" if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(_UpperCAmelCase )}''' ) UpperCAmelCase__ = False UpperCAmelCase__ = False UpperCAmelCase__ = False if self.does_advance(_UpperCAmelCase ): self.fulfilled_idx += 1 UpperCAmelCase__ = True if self.fulfilled_idx == (self.seqlen - 1): UpperCAmelCase__ = True UpperCAmelCase__ = completed else: # failed to make progress. UpperCAmelCase__ = True self.reset() return stepped, completed, reset def SCREAMING_SNAKE_CASE__ ( self : Tuple ): """simple docstring""" UpperCAmelCase__ = False UpperCAmelCase__ = 0 def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" return self.seqlen - (self.fulfilled_idx + 1) def SCREAMING_SNAKE_CASE__ ( self : Any , _UpperCAmelCase : Optional[int]=False ): """simple docstring""" UpperCAmelCase__ = PhrasalConstraint(self.token_ids ) if stateful: UpperCAmelCase__ = self.seqlen UpperCAmelCase__ = self.fulfilled_idx UpperCAmelCase__ = self.completed return new_constraint class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Any , _UpperCAmelCase : List[List[int]] , _UpperCAmelCase : List[str]=True ): """simple docstring""" UpperCAmelCase__ = max([len(_UpperCAmelCase ) for one in nested_token_ids] ) UpperCAmelCase__ = {} for token_ids in nested_token_ids: UpperCAmelCase__ = root for tidx, token_id in enumerate(_UpperCAmelCase ): if token_id not in level: UpperCAmelCase__ = {} UpperCAmelCase__ = level[token_id] if no_subsets and self.has_subsets(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError( """Each list in `nested_token_ids` can't be a complete subset of another list, but is""" f''' {nested_token_ids}.''' ) UpperCAmelCase__ = root def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : int ): """simple docstring""" UpperCAmelCase__ = self.trie for current_token in current_seq: UpperCAmelCase__ = start[current_token] UpperCAmelCase__ = list(start.keys() ) return next_tokens def SCREAMING_SNAKE_CASE__ ( self : str , _UpperCAmelCase : Tuple ): """simple docstring""" UpperCAmelCase__ = self.next_tokens(_UpperCAmelCase ) return len(_UpperCAmelCase ) == 0 def SCREAMING_SNAKE_CASE__ ( self : List[str] , _UpperCAmelCase : Optional[int] ): """simple docstring""" UpperCAmelCase__ = list(root.values() ) if len(_UpperCAmelCase ) == 0: return 1 else: return sum([self.count_leaves(_UpperCAmelCase ) for nn in next_nodes] ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict ): """simple docstring""" UpperCAmelCase__ = self.count_leaves(_UpperCAmelCase ) return len(_UpperCAmelCase ) != leaf_count class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' def __init__( self : Dict , _UpperCAmelCase : List[List[int]] ): """simple docstring""" super(_UpperCAmelCase , self ).__init__() if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or len(_UpperCAmelCase ) == 0: raise ValueError(f'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' ) if any(not isinstance(_UpperCAmelCase , _UpperCAmelCase ) for token_ids in nested_token_ids ): raise ValueError(f'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' ) if any( any((not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or token_id < 0) for token_id in token_ids ) for token_ids in nested_token_ids ): raise ValueError( f'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' ) UpperCAmelCase__ = DisjunctiveTrie(_UpperCAmelCase ) UpperCAmelCase__ = nested_token_ids UpperCAmelCase__ = self.trie.max_height UpperCAmelCase__ = [] UpperCAmelCase__ = False def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" UpperCAmelCase__ = self.trie.next_tokens(self.current_seq ) if len(_UpperCAmelCase ) == 0: return None else: return token_list def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : int ): """simple docstring""" if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(_UpperCAmelCase )}''' ) UpperCAmelCase__ = self.trie.next_tokens(self.current_seq ) return token_id in next_tokens def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : int ): """simple docstring""" if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(_UpperCAmelCase )}''' ) UpperCAmelCase__ = False UpperCAmelCase__ = False UpperCAmelCase__ = False if self.does_advance(_UpperCAmelCase ): self.current_seq.append(_UpperCAmelCase ) UpperCAmelCase__ = True else: UpperCAmelCase__ = True self.reset() UpperCAmelCase__ = self.trie.reached_leaf(self.current_seq ) UpperCAmelCase__ = completed return stepped, completed, reset def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" UpperCAmelCase__ = False UpperCAmelCase__ = [] def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" if self.completed: # since this can be completed without reaching max height return 0 else: return self.seqlen - len(self.current_seq ) def SCREAMING_SNAKE_CASE__ ( self : Tuple , _UpperCAmelCase : Dict=False ): """simple docstring""" UpperCAmelCase__ = DisjunctiveConstraint(self.token_ids ) if stateful: UpperCAmelCase__ = self.seqlen UpperCAmelCase__ = self.current_seq UpperCAmelCase__ = self.completed return new_constraint class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Optional[int] , _UpperCAmelCase : List[Constraint] ): """simple docstring""" UpperCAmelCase__ = constraints # max # of steps required to fulfill a given constraint UpperCAmelCase__ = max([c.seqlen for c in constraints] ) UpperCAmelCase__ = len(_UpperCAmelCase ) UpperCAmelCase__ = False self.init_state() def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" UpperCAmelCase__ = [] UpperCAmelCase__ = None UpperCAmelCase__ = [constraint.copy(stateful=_UpperCAmelCase ) for constraint in self.constraints] def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" UpperCAmelCase__ = 0 if self.inprogress_constraint: # extra points for having a constraint mid-fulfilled add += self.max_seqlen - self.inprogress_constraint.remaining() return (len(self.complete_constraints ) * self.max_seqlen) + add def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): """simple docstring""" UpperCAmelCase__ = [] if self.inprogress_constraint is None: for constraint in self.pending_constraints: # "pending" == "unfulfilled yet" UpperCAmelCase__ = constraint.advance() if isinstance(_UpperCAmelCase , _UpperCAmelCase ): token_list.append(_UpperCAmelCase ) elif isinstance(_UpperCAmelCase , _UpperCAmelCase ): token_list.extend(_UpperCAmelCase ) else: UpperCAmelCase__ = self.inprogress_constraint.advance() if isinstance(_UpperCAmelCase , _UpperCAmelCase ): token_list.append(_UpperCAmelCase ) elif isinstance(_UpperCAmelCase , _UpperCAmelCase ): token_list.extend(_UpperCAmelCase ) if len(_UpperCAmelCase ) == 0: return None else: return token_list def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : Optional[List[int]] ): """simple docstring""" self.init_state() if token_ids is not None: for token in token_ids: # completes or steps **one** constraint UpperCAmelCase__ , UpperCAmelCase__ = self.add(_UpperCAmelCase ) # the entire list of constraints are fulfilled if self.completed: break def SCREAMING_SNAKE_CASE__ ( self : Any , _UpperCAmelCase : int ): """simple docstring""" if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError(f'''`token_id` should be an `int`, but is `{token_id}`.''' ) UpperCAmelCase__ , UpperCAmelCase__ = False, False if self.completed: UpperCAmelCase__ = True UpperCAmelCase__ = False return complete, stepped if self.inprogress_constraint is not None: # In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current # job, simply update the state UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.inprogress_constraint.update(_UpperCAmelCase ) if reset: # 1. If the next token breaks the progress, then we must restart. # e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books". # But that doesn't mean we self.init_state(), since we only reset the state for this particular # constraint, not the full list of constraints. self.pending_constraints.append(self.inprogress_constraint.copy(stateful=_UpperCAmelCase ) ) UpperCAmelCase__ = None if complete: # 2. If the next token completes the constraint, move it to completed list, set # inprogress to None. If there are no pending constraints either, then this full list of constraints # is complete. self.complete_constraints.append(self.inprogress_constraint ) UpperCAmelCase__ = None if len(self.pending_constraints ) == 0: # we're done! UpperCAmelCase__ = True else: # Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list # of constraints? for cidx, pending_constraint in enumerate(self.pending_constraints ): if pending_constraint.does_advance(_UpperCAmelCase ): UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = pending_constraint.update(_UpperCAmelCase ) if not stepped: raise Exception( """`constraint.update(token_id)` is not yielding incremental progress, """ """even though `constraint.does_advance(token_id)` is true.""" ) if complete: self.complete_constraints.append(_UpperCAmelCase ) UpperCAmelCase__ = None if not complete and stepped: UpperCAmelCase__ = pending_constraint if complete or stepped: # If we made any progress at all, then it's at least not a "pending constraint". UpperCAmelCase__ = ( self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :] ) if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None: # If there's no longer any pending after this and no inprogress either, then we must be # complete. UpperCAmelCase__ = True break # prevent accidentally stepping through multiple constraints with just one token. return complete, stepped def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : List[Any]=True ): """simple docstring""" UpperCAmelCase__ = ConstraintListState(self.constraints ) # we actually never though self.constraints objects # throughout this process. So it's at initialization state. if stateful: UpperCAmelCase__ = [ constraint.copy(stateful=_UpperCAmelCase ) for constraint in self.complete_constraints ] if self.inprogress_constraint is not None: UpperCAmelCase__ = self.inprogress_constraint.copy(stateful=_UpperCAmelCase ) UpperCAmelCase__ = [constraint.copy() for constraint in self.pending_constraints] return new_state
346
0
import unicodedata from dataclasses import dataclass from typing import Optional, Union import numpy as np from transformers.data.data_collator import DataCollatorMixin from transformers.file_utils import PaddingStrategy from transformers.tokenization_utils_base import PreTrainedTokenizerBase def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ): if isinstance(lowercase__ , lowercase__ ): __SCREAMING_SNAKE_CASE : Any = np.full((len(lowercase__ ), sequence_length, 2) , lowercase__ ) else: __SCREAMING_SNAKE_CASE : List[Any] = np.full((len(lowercase__ ), sequence_length) , lowercase__ ) for i, tensor in enumerate(lowercase__ ): if padding_side == "right": if isinstance(lowercase__ , lowercase__ ): __SCREAMING_SNAKE_CASE : Dict = tensor[:sequence_length] else: __SCREAMING_SNAKE_CASE : Optional[Any] = tensor[:sequence_length] else: if isinstance(lowercase__ , lowercase__ ): __SCREAMING_SNAKE_CASE : List[Any] = tensor[:sequence_length] else: __SCREAMING_SNAKE_CASE : Optional[int] = tensor[:sequence_length] return out_tensor.tolist() def _UpperCamelCase ( lowercase__ ): __SCREAMING_SNAKE_CASE : Dict = ord(lowercase__ ) if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126): return True __SCREAMING_SNAKE_CASE : List[str] = unicodedata.category(lowercase__ ) if cat.startswith('''P''' ): return True return False @dataclass class _lowercase ( A__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : PreTrainedTokenizerBase SCREAMING_SNAKE_CASE__ : Union[bool, str, PaddingStrategy] = True SCREAMING_SNAKE_CASE__ : Optional[int] = None SCREAMING_SNAKE_CASE__ : Optional[int] = None SCREAMING_SNAKE_CASE__ : int = -100 SCREAMING_SNAKE_CASE__ : str = "pt" def __magic_name__( self :Tuple , lowerCAmelCase__ :Any ) -> List[Any]: import torch __SCREAMING_SNAKE_CASE : int = '''label''' if '''label''' in features[0].keys() else '''labels''' __SCREAMING_SNAKE_CASE : List[str] = [feature[label_name] for feature in features] if label_name in features[0].keys() else None __SCREAMING_SNAKE_CASE : List[str] = self.tokenizer.pad( lowerCAmelCase__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , ) if labels is None: return batch __SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(batch['''entity_ids'''] ).shape[1] __SCREAMING_SNAKE_CASE : List[str] = self.tokenizer.padding_side if padding_side == "right": __SCREAMING_SNAKE_CASE : Optional[int] = [ list(lowerCAmelCase__ ) + [self.label_pad_token_id] * (sequence_length - len(lowerCAmelCase__ )) for label in labels ] else: __SCREAMING_SNAKE_CASE : Optional[Any] = [ [self.label_pad_token_id] * (sequence_length - len(lowerCAmelCase__ )) + list(lowerCAmelCase__ ) for label in labels ] __SCREAMING_SNAKE_CASE : Optional[Any] = [feature['''ner_tags'''] for feature in features] __SCREAMING_SNAKE_CASE : Optional[int] = padding_tensor(lowerCAmelCase__ , -1 , lowerCAmelCase__ , lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : List[Any] = [feature['''original_entity_spans'''] for feature in features] __SCREAMING_SNAKE_CASE : int = padding_tensor(lowerCAmelCase__ , (-1, -1) , lowerCAmelCase__ , lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Optional[int] = {k: torch.tensor(lowerCAmelCase__ , dtype=torch.intaa ) for k, v in batch.items()} return batch
9
'''simple docstring''' import doctest import logging import os import unittest from pathlib import Path from typing import List, Union import transformers from transformers.testing_utils import require_tf, require_torch, slow UpperCAmelCase_ = logging.getLogger() @unittest.skip("""Temporarily disable the doc tests.""" ) @require_torch @require_tf @slow class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : str , _UpperCAmelCase : Path , _UpperCAmelCase : Union[str, None] = None , _UpperCAmelCase : Union[List[str], None] = None , _UpperCAmelCase : Union[str, List[str], None] = None , _UpperCAmelCase : bool = True , ): """simple docstring""" UpperCAmelCase__ = [file for file in os.listdir(_UpperCAmelCase ) if os.path.isfile(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) )] if identifier is not None: UpperCAmelCase__ = [file for file in files if identifier in file] if n_identifier is not None: if isinstance(_UpperCAmelCase , _UpperCAmelCase ): for n_ in n_identifier: UpperCAmelCase__ = [file for file in files if n_ not in file] else: UpperCAmelCase__ = [file for file in files if n_identifier not in file] UpperCAmelCase__ = ignore_files or [] ignore_files.append("""__init__.py""" ) UpperCAmelCase__ = [file for file in files if file not in ignore_files] for file in files: # Open all files print("""Testing""" , _UpperCAmelCase ) if only_modules: UpperCAmelCase__ = file.split(""".""" )[0] try: UpperCAmelCase__ = getattr(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = doctest.DocTestSuite(_UpperCAmelCase ) UpperCAmelCase__ = unittest.TextTestRunner().run(_UpperCAmelCase ) self.assertIs(len(result.failures ) , 0 ) except AttributeError: logger.info(f'''{module_identifier} is not a module.''' ) else: UpperCAmelCase__ = doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS ) self.assertIs(result.failed , 0 ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" UpperCAmelCase__ = Path("""src/transformers""" ) UpperCAmelCase__ = """modeling""" UpperCAmelCase__ = [ """modeling_ctrl.py""", """modeling_tf_ctrl.py""", ] self.analyze_directory(_UpperCAmelCase , identifier=_UpperCAmelCase , ignore_files=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" UpperCAmelCase__ = Path("""src/transformers""" ) UpperCAmelCase__ = """tokenization""" self.analyze_directory(_UpperCAmelCase , identifier=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" UpperCAmelCase__ = Path("""src/transformers""" ) UpperCAmelCase__ = """configuration""" self.analyze_directory(_UpperCAmelCase , identifier=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" UpperCAmelCase__ = Path("""src/transformers""" ) UpperCAmelCase__ = ["""configuration""", """modeling""", """tokenization"""] self.analyze_directory(_UpperCAmelCase , n_identifier=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" UpperCAmelCase__ = Path("""docs/source""" ) UpperCAmelCase__ = ["""favicon.ico"""] self.analyze_directory(_UpperCAmelCase , ignore_files=_UpperCAmelCase , only_modules=_UpperCAmelCase )
346
0
import datasets __A = "\\n@InProceedings{conneau2018xnli,\n author = \"Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin\",\n title = \"XNLI: Evaluating Cross-lingual Sentence Representations\",\n booktitle = \"Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing\",\n year = \"2018\",\n publisher = \"Association for Computational Linguistics\",\n location = \"Brussels, Belgium\",\n}\n" __A = "\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n" __A = "\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n 'accuracy': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric(\"xnli\")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n" def lowerCAmelCase_ ( __a , __a ) -> List[str]: """simple docstring""" return (preds == labels).mean() @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _SCREAMING_SNAKE_CASE ( datasets.Metric ): '''simple docstring''' def SCREAMING_SNAKE_CASE_ (self : int) ->Union[str, Any]: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("int64" if self.config_name != "sts-b" else "float32"), "references": datasets.Value("int64" if self.config_name != "sts-b" else "float32"), }) , codebase_urls=[] , reference_urls=[] , format="numpy" , ) def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str]) ->Union[str, Any]: '''simple docstring''' return {"accuracy": simple_accuracy(UpperCAmelCase_ , UpperCAmelCase_)}
10
'''simple docstring''' from datasets.utils.patching import _PatchedModuleObj, patch_submodule from . import _test_patching def _UpperCamelCase ( ): '''simple docstring''' import os as original_os from os import path as original_path from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join UpperCAmelCase__ = """__test_patch_submodule_mock__""" with patch_submodule(_test_patching , """os.path.join""" , SCREAMING_SNAKE_CASE__ ): # Every way to access os.path.join must be patched, and the rest must stay untouched # check os.path.join assert isinstance(_test_patching.os , _PatchedModuleObj ) assert isinstance(_test_patching.os.path , _PatchedModuleObj ) assert _test_patching.os.path.join is mock # check path.join assert isinstance(_test_patching.path , _PatchedModuleObj ) assert _test_patching.path.join is mock # check join assert _test_patching.join is mock # check that the other attributes are untouched assert _test_patching.os.rename is original_rename assert _test_patching.path.dirname is original_dirname assert _test_patching.os.path.dirname is original_dirname # Even renamed modules or objects must be patched # check renamed_os.path.join assert isinstance(_test_patching.renamed_os , _PatchedModuleObj ) assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj ) assert _test_patching.renamed_os.path.join is mock # check renamed_path.join assert isinstance(_test_patching.renamed_path , _PatchedModuleObj ) assert _test_patching.renamed_path.join is mock # check renamed_join assert _test_patching.renamed_join is mock # check that the other attributes are untouched assert _test_patching.renamed_os.rename is original_rename assert _test_patching.renamed_path.dirname is original_dirname assert _test_patching.renamed_os.path.dirname is original_dirname # check that everthing is back to normal when the patch is over assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join def _UpperCamelCase ( ): '''simple docstring''' assert _test_patching.open is open UpperCAmelCase__ = """__test_patch_submodule_builtin_mock__""" # _test_patching has "open" in its globals assert _test_patching.open is open with patch_submodule(_test_patching , """open""" , SCREAMING_SNAKE_CASE__ ): assert _test_patching.open is mock # check that everthing is back to normal when the patch is over assert _test_patching.open is open def _UpperCamelCase ( ): '''simple docstring''' UpperCAmelCase__ = """__test_patch_submodule_missing_mock__""" with patch_submodule(_test_patching , """pandas.read_csv""" , SCREAMING_SNAKE_CASE__ ): pass def _UpperCamelCase ( ): '''simple docstring''' UpperCAmelCase__ = """__test_patch_submodule_missing_builtin_mock__""" # _test_patching doesn't have "len" in its globals assert getattr(_test_patching , """len""" , SCREAMING_SNAKE_CASE__ ) is None with patch_submodule(_test_patching , """len""" , SCREAMING_SNAKE_CASE__ ): assert _test_patching.len is mock assert _test_patching.len is len def _UpperCamelCase ( ): '''simple docstring''' UpperCAmelCase__ = """__test_patch_submodule_start_and_stop_mock__""" UpperCAmelCase__ = patch_submodule(_test_patching , """open""" , SCREAMING_SNAKE_CASE__ ) assert _test_patching.open is open patch.start() assert _test_patching.open is mock patch.stop() assert _test_patching.open is open def _UpperCamelCase ( ): '''simple docstring''' from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join UpperCAmelCase__ = """__test_patch_submodule_successive_join__""" UpperCAmelCase__ = """__test_patch_submodule_successive_dirname__""" UpperCAmelCase__ = """__test_patch_submodule_successive_rename__""" assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename with patch_submodule(_test_patching , """os.path.join""" , SCREAMING_SNAKE_CASE__ ): with patch_submodule(_test_patching , """os.rename""" , SCREAMING_SNAKE_CASE__ ): with patch_submodule(_test_patching , """os.path.dirname""" , SCREAMING_SNAKE_CASE__ ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename # try another order with patch_submodule(_test_patching , """os.rename""" , SCREAMING_SNAKE_CASE__ ): with patch_submodule(_test_patching , """os.path.join""" , SCREAMING_SNAKE_CASE__ ): with patch_submodule(_test_patching , """os.path.dirname""" , SCREAMING_SNAKE_CASE__ ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename def _UpperCamelCase ( ): '''simple docstring''' UpperCAmelCase__ = """__test_patch_submodule_doesnt_exist_mock__""" with patch_submodule(_test_patching , """__module_that_doesn_exist__.__attribute_that_doesn_exist__""" , SCREAMING_SNAKE_CASE__ ): pass with patch_submodule(_test_patching , """os.__attribute_that_doesn_exist__""" , SCREAMING_SNAKE_CASE__ ): pass
346
0
import os from itertools import chain from random import randrange, shuffle import pytest from .sola import PokerHand lowerCAmelCase__ = ( '4S 3H 2C 7S 5H', '9D 8H 2C 6S 7H', '2D 6D 9D TH 7D', 'TC 8C 2S JH 6C', 'JH 8S TH AH QH', 'TS KS 5S 9S AC', 'KD 6S 9D TH AD', 'KS 8D 4D 9S 4S', # pair '8C 4S KH JS 4D', # pair 'QH 8H KD JH 8S', # pair 'KC 4H KS 2H 8D', # pair 'KD 4S KC 3H 8S', # pair 'AH 8S AS KC JH', # pair '3H 4C 4H 3S 2H', # 2 pairs '5S 5D 2C KH KH', # 2 pairs '3C KH 5D 5S KH', # 2 pairs 'AS 3C KH AD KH', # 2 pairs '7C 7S 3S 7H 5S', # 3 of a kind '7C 7S KH 2H 7H', # 3 of a kind 'AC KH QH AH AS', # 3 of a kind '2H 4D 3C AS 5S', # straight (low ace) '3C 5C 4C 2C 6H', # straight '6S 8S 7S 5H 9H', # straight 'JS QS 9H TS KH', # straight 'QC KH TS JS AH', # straight (high ace) '8C 9C 5C 3C TC', # flush '3S 8S 9S 5S KS', # flush '4C 5C 9C 8C KC', # flush 'JH 8H AH KH QH', # flush '3D 2H 3H 2C 2D', # full house '2H 2C 3S 3H 3D', # full house 'KH KC 3S 3H 3D', # full house 'JC 6H JS JD JH', # 4 of a kind 'JC 7H JS JD JH', # 4 of a kind 'JC KH JS JD JH', # 4 of a kind '2S AS 4S 5S 3S', # straight flush (low ace) '2D 6D 3D 4D 5D', # straight flush '5C 6C 3C 7C 4C', # straight flush 'JH 9H TH KH QH', # straight flush 'JH AH TH KH QH', # royal flush (high ace straight flush) ) lowerCAmelCase__ = ( ('2H 3H 4H 5H 6H', 'KS AS TS QS JS', 'Loss'), ('2H 3H 4H 5H 6H', 'AS AD AC AH JD', 'Win'), ('AS AH 2H AD AC', 'JS JD JC JH 3D', 'Win'), ('2S AH 2H AS AC', 'JS JD JC JH AD', 'Loss'), ('2S AH 2H AS AC', '2H 3H 5H 6H 7H', 'Win'), ('AS 3S 4S 8S 2S', '2H 3H 5H 6H 7H', 'Win'), ('2H 3H 5H 6H 7H', '2S 3H 4H 5S 6C', 'Win'), ('2S 3H 4H 5S 6C', '3D 4C 5H 6H 2S', 'Tie'), ('2S 3H 4H 5S 6C', 'AH AC 5H 6H AS', 'Win'), ('2S 2H 4H 5S 4C', 'AH AC 5H 6H AS', 'Loss'), ('2S 2H 4H 5S 4C', 'AH AC 5H 6H 7S', 'Win'), ('6S AD 7H 4S AS', 'AH AC 5H 6H 7S', 'Loss'), ('2S AH 4H 5S KC', 'AH AC 5H 6H 7S', 'Loss'), ('2S 3H 6H 7S 9C', '7H 3C TH 6H 9S', 'Loss'), ('4S 5H 6H TS AC', '3S 5H 6H TS AC', 'Win'), ('2S AH 4H 5S 6C', 'AD 4C 5H 6H 2C', 'Tie'), ('AS AH 3H AD AC', 'AS AH 2H AD AC', 'Win'), ('AH AC 5H 5C QS', 'AH AC 5H 5C KS', 'Loss'), ('AH AC 5H 5C QS', 'KH KC 5H 5C QS', 'Win'), ('7C 7S KH 2H 7H', '3C 3S AH 2H 3H', 'Win'), ('3C 3S AH 2H 3H', '7C 7S KH 2H 7H', 'Loss'), ('6H 5H 4H 3H 2H', '5H 4H 3H 2H AH', 'Win'), ('5H 4H 3H 2H AH', '5H 4H 3H 2H AH', 'Tie'), ('5H 4H 3H 2H AH', '6H 5H 4H 3H 2H', 'Loss'), ('AH AD KS KC AC', 'AH KD KH AC KC', 'Win'), ('2H 4D 3C AS 5S', '2H 4D 3C 6S 5S', 'Loss'), ('2H 3S 3C 3H 2S', '3S 3C 2S 2H 2D', 'Win'), ('4D 6D 5D 2D JH', '3S 8S 3H TC KH', 'Loss'), ('4S 6C 8S 3S 7S', 'AD KS 2D 7D 7C', 'Loss'), ('6S 4C 7H 8C 3H', '5H JC AH 9D 9C', 'Loss'), ('9D 9H JH TC QH', '3C 2S JS 5C 7H', 'Win'), ('2H TC 8S AD 9S', '4H TS 7H 2C 5C', 'Win'), ('9D 3S 2C 7S 7C', 'JC TD 3C TC 9H', 'Loss'), ) lowerCAmelCase__ = ( ('2H 3H 4H 5H 6H', True), ('AS AH 2H AD AC', False), ('2H 3H 5H 6H 7H', True), ('KS AS TS QS JS', True), ('8H 9H QS JS TH', False), ('AS 3S 4S 8S 2S', True), ) lowerCAmelCase__ = ( ('2H 3H 4H 5H 6H', True), ('AS AH 2H AD AC', False), ('2H 3H 5H 6H 7H', False), ('KS AS TS QS JS', True), ('8H 9H QS JS TH', True), ) lowerCAmelCase__ = ( ('2H 4D 3C AS 5S', True, [5, 4, 3, 2, 14]), ('2H 5D 3C AS 5S', False, [14, 5, 5, 3, 2]), ('JH QD KC AS TS', False, [14, 13, 12, 11, 10]), ('9D 3S 2C 7S 7C', False, [9, 7, 7, 3, 2]), ) lowerCAmelCase__ = ( ('JH AH TH KH QH', 0), ('JH 9H TH KH QH', 0), ('JC KH JS JD JH', 7), ('KH KC 3S 3H 3D', 6), ('8C 9C 5C 3C TC', 0), ('JS QS 9H TS KH', 0), ('7C 7S KH 2H 7H', 3), ('3C KH 5D 5S KH', 2), ('QH 8H KD JH 8S', 1), ('2D 6D 9D TH 7D', 0), ) lowerCAmelCase__ = ( ('JH AH TH KH QH', 23), ('JH 9H TH KH QH', 22), ('JC KH JS JD JH', 21), ('KH KC 3S 3H 3D', 20), ('8C 9C 5C 3C TC', 19), ('JS QS 9H TS KH', 18), ('7C 7S KH 2H 7H', 17), ('3C KH 5D 5S KH', 16), ('QH 8H KD JH 8S', 15), ('2D 6D 9D TH 7D', 14), ) def _UpperCAmelCase (): _A , _A : Dict = randrange(len(UpperCamelCase__ ) ), randrange(len(UpperCamelCase__ ) ) _A : Union[str, Any] = ["Loss", "Tie", "Win"][(play >= oppo) + (play > oppo)] _A , _A : Union[str, Any] = SORTED_HANDS[play], SORTED_HANDS[oppo] return hand, other, expected def _UpperCAmelCase (UpperCamelCase__ : int = 100 ): return (generate_random_hand() for _ in range(UpperCamelCase__ )) @pytest.mark.parametrize("hand, expected" , UpperCamelCase__ ) def _UpperCAmelCase (UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] ): assert PokerHand(UpperCamelCase__ )._is_flush() == expected @pytest.mark.parametrize("hand, expected" , UpperCamelCase__ ) def _UpperCAmelCase (UpperCamelCase__ : str , UpperCamelCase__ : List[str] ): assert PokerHand(UpperCamelCase__ )._is_straight() == expected @pytest.mark.parametrize("hand, expected, card_values" , UpperCamelCase__ ) def _UpperCAmelCase (UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : List[Any] ): _A : Optional[Any] = PokerHand(UpperCamelCase__ ) assert player._is_five_high_straight() == expected assert player._card_values == card_values @pytest.mark.parametrize("hand, expected" , UpperCamelCase__ ) def _UpperCAmelCase (UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple ): assert PokerHand(UpperCamelCase__ )._is_same_kind() == expected @pytest.mark.parametrize("hand, expected" , UpperCamelCase__ ) def _UpperCAmelCase (UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] ): assert PokerHand(UpperCamelCase__ )._hand_type == expected @pytest.mark.parametrize("hand, other, expected" , UpperCamelCase__ ) def _UpperCAmelCase (UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] ): assert PokerHand(UpperCamelCase__ ).compare_with(PokerHand(UpperCamelCase__ ) ) == expected @pytest.mark.parametrize("hand, other, expected" , generate_random_hands() ) def _UpperCAmelCase (UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Any ): assert PokerHand(UpperCamelCase__ ).compare_with(PokerHand(UpperCamelCase__ ) ) == expected def _UpperCAmelCase (): _A : Optional[Any] = [PokerHand(UpperCamelCase__ ) for hand in SORTED_HANDS] _A : List[str] = poker_hands.copy() shuffle(UpperCamelCase__ ) _A : Union[str, Any] = chain(sorted(UpperCamelCase__ ) ) for index, hand in enumerate(UpperCamelCase__ ): assert hand == poker_hands[index] def _UpperCAmelCase (): # Test that five high straights are compared correctly. _A : Optional[int] = [PokerHand("2D AC 3H 4H 5S" ), PokerHand("2S 3H 4H 5S 6C" )] pokerhands.sort(reverse=UpperCamelCase__ ) assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C" def _UpperCAmelCase (): # Multiple calls to five_high_straight function should still return True # and shouldn't mutate the list in every call other than the first. _A : List[str] = PokerHand("2C 4S AS 3D 5C" ) _A : List[Any] = True _A : Tuple = [5, 4, 3, 2, 14] for _ in range(10 ): assert pokerhand._is_five_high_straight() == expected assert pokerhand._card_values == expected_card_values def _UpperCAmelCase (): # Problem number 54 from Project Euler # Testing from poker_hands.txt file _A : str = 0 _A : List[str] = os.path.abspath(os.path.dirname(UpperCamelCase__ ) ) _A : Tuple = os.path.join(UpperCamelCase__ , "poker_hands.txt" ) with open(UpperCamelCase__ ) as file_hand: for line in file_hand: _A : Tuple = line[:14].strip() _A : Dict = line[15:].strip() _A , _A : Any = PokerHand(UpperCamelCase__ ), PokerHand(UpperCamelCase__ ) _A : Union[str, Any] = player.compare_with(UpperCamelCase__ ) if output == "Win": answer += 1 assert answer == 376
11
'''simple docstring''' from timeit import timeit UpperCAmelCase_ = { 'MALAYALAM': True, 'String': False, 'rotor': True, 'level': True, 'A': True, 'BB': True, 'ABC': False, 'amanaplanacanalpanama': True, # "a man a plan a canal panama" } # Ensure our test data is valid assert all((key == key[::-1]) is value for key, value in test_data.items()) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' UpperCAmelCase__ = 0 UpperCAmelCase__ = len(SCREAMING_SNAKE_CASE__ ) - 1 while start_i < end_i: if s[start_i] == s[end_i]: start_i += 1 end_i -= 1 else: return False return True def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' UpperCAmelCase__ = len(SCREAMING_SNAKE_CASE__ ) // 2 UpperCAmelCase__ = len(SCREAMING_SNAKE_CASE__ ) # We need to traverse till half of the length of string # as we can get access of the i'th last element from # i'th index. # eg: [0,1,2,3,4,5] => 4th index can be accessed # with the help of 1st index (i==n-i-1) # where n is length of string return all(s[i] == s[n - i - 1] for i in range(SCREAMING_SNAKE_CASE__ ) ) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' if len(SCREAMING_SNAKE_CASE__ ) <= 2: return True if s[0] == s[len(SCREAMING_SNAKE_CASE__ ) - 1]: return is_palindrome_recursive(s[1:-1] ) else: return False def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' return s == s[::-1] def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' UpperCAmelCase__ = F'''all({name}(key) is value for key, value in test_data.items())''' UpperCAmelCase__ = F'''from __main__ import test_data, {name}''' UpperCAmelCase__ = 500000 UpperCAmelCase__ = timeit(stmt=SCREAMING_SNAKE_CASE__ , setup=SCREAMING_SNAKE_CASE__ , number=SCREAMING_SNAKE_CASE__ ) print(F'''{name:<35} finished {number:,} runs in {result:.5f} seconds''' ) if __name__ == "__main__": for key, value in test_data.items(): assert is_palindrome(key) is is_palindrome_recursive(key) assert is_palindrome(key) is is_palindrome_slice(key) print(f"{key:21} {value}") print('a man a plan a canal panama') # finished 500,000 runs in 0.46793 seconds benchmark_function('is_palindrome_slice') # finished 500,000 runs in 0.85234 seconds benchmark_function('is_palindrome') # finished 500,000 runs in 1.32028 seconds benchmark_function('is_palindrome_recursive') # finished 500,000 runs in 2.08679 seconds benchmark_function('is_palindrome_traversal')
346
0
import numpy as np def lowerCamelCase__ ( A__ : np.ndarray , A__ : float ): '''simple docstring''' return np.where(vector > 0 , A__ , (alpha * (np.exp(A__ ) - 1)) ) if __name__ == "__main__": import doctest doctest.testmod()
12
'''simple docstring''' import datasets from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py UpperCAmelCase_ = '\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",\n author = "Lin, Chin-Yew and\n Och, Franz Josef",\n booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",\n month = "aug 23{--}aug 27",\n year = "2004",\n address = "Geneva, Switzerland",\n publisher = "COLING",\n url = "https://www.aclweb.org/anthology/C04-1072",\n pages = "501--507",\n}\n' UpperCAmelCase_ = '\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,\nthe better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n' UpperCAmelCase_ = '\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n \'bleu\': bleu score,\n \'precisions\': geometric mean of n-gram precisions,\n \'brevity_penalty\': brevity penalty,\n \'length_ratio\': ratio of lengths,\n \'translation_length\': translation_length,\n \'reference_length\': reference_length\nExamples:\n\n >>> predictions = [\n ... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample\n ... ["foo", "bar", "foobar"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)\n ... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric("bleu")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results["bleu"])\n 1.0\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase_ ( datasets.Metric ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : int ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ), """references""": datasets.Sequence( datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ), } ) , codebase_urls=["""https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"""] , reference_urls=[ """https://en.wikipedia.org/wiki/BLEU""", """https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""", ] , ) def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any]=4 , _UpperCAmelCase : Union[str, Any]=False ): """simple docstring""" UpperCAmelCase__ = compute_bleu( reference_corpus=_UpperCAmelCase , translation_corpus=_UpperCAmelCase , max_order=_UpperCAmelCase , smooth=_UpperCAmelCase ) ((UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__)) = score return { "bleu": bleu, "precisions": precisions, "brevity_penalty": bp, "length_ratio": ratio, "translation_length": translation_length, "reference_length": reference_length, }
346
0
def A_ ( ): SCREAMING_SNAKE_CASE_: List[Any] = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] SCREAMING_SNAKE_CASE_: Optional[int] = 6 SCREAMING_SNAKE_CASE_: List[str] = 1 SCREAMING_SNAKE_CASE_: str = 19_01 SCREAMING_SNAKE_CASE_: Optional[Any] = 0 while year < 20_01: day += 7 if (year % 4 == 0 and year % 1_00 != 0) or (year % 4_00 == 0): if day > days_per_month[month - 1] and month != 2: month += 1 SCREAMING_SNAKE_CASE_: str = day - days_per_month[month - 2] elif day > 29 and month == 2: month += 1 SCREAMING_SNAKE_CASE_: Optional[Any] = day - 29 else: if day > days_per_month[month - 1]: month += 1 SCREAMING_SNAKE_CASE_: Union[str, Any] = day - days_per_month[month - 2] if month > 12: year += 1 SCREAMING_SNAKE_CASE_: Optional[int] = 1 if year < 20_01 and day == 1: sundays += 1 return sundays if __name__ == "__main__": print(solution())
13
'''simple docstring''' from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import torch from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available @dataclass class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' lowerCAmelCase_ : Union[List[np.ndarray], torch.FloatTensor] try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_text_to_video_synth import TextToVideoSDPipeline from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401 from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
346
0
from __future__ import annotations class UpperCamelCase_ : '''simple docstring''' def __init__( self : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : str) ->List[str]: '''simple docstring''' A__ , A__ = text, pattern A__ , A__ = len(UpperCAmelCase__), len(UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : str) ->int: '''simple docstring''' for i in range(self.patLen - 1 , -1 , -1): if char == self.pattern[i]: return i return -1 def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : int) ->int: '''simple docstring''' for i in range(self.patLen - 1 , -1 , -1): if self.pattern[i] != self.text[current_pos + i]: return current_pos + i return -1 def SCREAMING_SNAKE_CASE ( self : int) ->list[int]: '''simple docstring''' A__ = [] for i in range(self.textLen - self.patLen + 1): A__ = self.mismatch_in_text(UpperCAmelCase__) if mismatch_index == -1: positions.append(UpperCAmelCase__) else: A__ = self.match_in_pattern(self.text[mismatch_index]) A__ = ( mismatch_index - match_index ) # shifting index lgtm [py/multiple-definition] return positions _lowerCamelCase : List[Any] = """ABAABA""" _lowerCamelCase : List[Any] = """AB""" _lowerCamelCase : Dict = BoyerMooreSearch(text, pattern) _lowerCamelCase : List[Any] = bms.bad_character_heuristic() if len(positions) == 0: print("""No match found""") else: print("""Pattern found in following positions: """) print(positions)
14
'''simple docstring''' import torch import torch.nn as nn from transformers.modeling_utils import ModuleUtilsMixin from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class lowerCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): '''simple docstring''' @register_to_config def __init__( self : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : float , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : bool = False , ): """simple docstring""" super().__init__() UpperCAmelCase__ = nn.Embedding(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = nn.Embedding(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = False UpperCAmelCase__ = nn.Dropout(p=_UpperCAmelCase ) UpperCAmelCase__ = TaConfig( vocab_size=_UpperCAmelCase , d_model=_UpperCAmelCase , num_heads=_UpperCAmelCase , d_kv=_UpperCAmelCase , d_ff=_UpperCAmelCase , dropout_rate=_UpperCAmelCase , feed_forward_proj=_UpperCAmelCase , is_decoder=_UpperCAmelCase , is_encoder_decoder=_UpperCAmelCase , ) UpperCAmelCase__ = nn.ModuleList() for lyr_num in range(_UpperCAmelCase ): UpperCAmelCase__ = TaBlock(_UpperCAmelCase ) self.encoders.append(_UpperCAmelCase ) UpperCAmelCase__ = TaLayerNorm(_UpperCAmelCase ) UpperCAmelCase__ = nn.Dropout(p=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : int , _UpperCAmelCase : str ): """simple docstring""" UpperCAmelCase__ = self.token_embedder(_UpperCAmelCase ) UpperCAmelCase__ = encoder_input_tokens.shape[1] UpperCAmelCase__ = torch.arange(_UpperCAmelCase , device=encoder_input_tokens.device ) x += self.position_encoding(_UpperCAmelCase ) UpperCAmelCase__ = self.dropout_pre(_UpperCAmelCase ) # inverted the attention mask UpperCAmelCase__ = encoder_input_tokens.size() UpperCAmelCase__ = self.get_extended_attention_mask(_UpperCAmelCase , _UpperCAmelCase ) for lyr in self.encoders: UpperCAmelCase__ = lyr(_UpperCAmelCase , _UpperCAmelCase )[0] UpperCAmelCase__ = self.layer_norm(_UpperCAmelCase ) return self.dropout_post(_UpperCAmelCase ), encoder_inputs_mask
346
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) SCREAMING_SNAKE_CASE :Tuple = { 'configuration_distilbert': [ 'DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DistilBertConfig', 'DistilBertOnnxConfig', ], 'tokenization_distilbert': ['DistilBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE :Union[str, Any] = ['DistilBertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE :Optional[Any] = [ 'DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'DistilBertForMaskedLM', 'DistilBertForMultipleChoice', 'DistilBertForQuestionAnswering', 'DistilBertForSequenceClassification', 'DistilBertForTokenClassification', 'DistilBertModel', 'DistilBertPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE :Optional[Any] = [ 'TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFDistilBertForMaskedLM', 'TFDistilBertForMultipleChoice', 'TFDistilBertForQuestionAnswering', 'TFDistilBertForSequenceClassification', 'TFDistilBertForTokenClassification', 'TFDistilBertMainLayer', 'TFDistilBertModel', 'TFDistilBertPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE :str = [ 'FlaxDistilBertForMaskedLM', 'FlaxDistilBertForMultipleChoice', 'FlaxDistilBertForQuestionAnswering', 'FlaxDistilBertForSequenceClassification', 'FlaxDistilBertForTokenClassification', 'FlaxDistilBertModel', 'FlaxDistilBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_distilbert import ( DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DistilBertConfig, DistilBertOnnxConfig, ) from .tokenization_distilbert import DistilBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_distilbert_fast import DistilBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_distilbert import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, DistilBertPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertMainLayer, TFDistilBertModel, TFDistilBertPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, FlaxDistilBertPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
15
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConfig, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaForPreTraining, WavaVecaProcessor, logging, ) from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification logging.set_verbosity_info() UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'adapter_layer': 'encoder.layers.*.adapter_layer', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', 'pooling_layer.linear': 'projector', 'pooling_layer.projection': 'classifier', } UpperCAmelCase_ = [ 'lm_head', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', 'projector', 'classifier', ] def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Tuple ): '''simple docstring''' UpperCAmelCase__ = {} with open(SCREAMING_SNAKE_CASE__ , """r""" ) as file: for line_number, line in enumerate(SCREAMING_SNAKE_CASE__ ): UpperCAmelCase__ = line.strip() if line: UpperCAmelCase__ = line.split() UpperCAmelCase__ = line_number UpperCAmelCase__ = words[0] UpperCAmelCase__ = value return result def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ): '''simple docstring''' for attribute in key.split(""".""" ): UpperCAmelCase__ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(SCREAMING_SNAKE_CASE__ ): UpperCAmelCase__ = PARAM_MAPPING[full_name.split(""".""" )[-1]] UpperCAmelCase__ = """param""" if weight_type is not None and weight_type != "param": UpperCAmelCase__ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).shape elif weight_type is not None and weight_type == "param": UpperCAmelCase__ = hf_pointer for attribute in hf_param_name.split(""".""" ): UpperCAmelCase__ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = shape_pointer.shape # let's reduce dimension UpperCAmelCase__ = value[0] else: UpperCAmelCase__ = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": UpperCAmelCase__ = value elif weight_type == "weight_g": UpperCAmelCase__ = value elif weight_type == "weight_v": UpperCAmelCase__ = value elif weight_type == "bias": UpperCAmelCase__ = value elif weight_type == "param": for attribute in hf_param_name.split(""".""" ): UpperCAmelCase__ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = value else: UpperCAmelCase__ = value logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' ) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(SCREAMING_SNAKE_CASE__ ): UpperCAmelCase__ = PARAM_MAPPING[full_name.split(""".""" )[-1]] UpperCAmelCase__ = """param""" if weight_type is not None and weight_type != "param": UpperCAmelCase__ = """.""".join([key, weight_type] ) elif weight_type is not None and weight_type == "param": UpperCAmelCase__ = """.""".join([key, hf_param_name] ) else: UpperCAmelCase__ = key UpperCAmelCase__ = value if """lm_head""" in full_key else value[0] UpperCAmelCase_ = { 'W_a': 'linear_1.weight', 'W_b': 'linear_2.weight', 'b_a': 'linear_1.bias', 'b_b': 'linear_2.bias', 'ln_W': 'norm.weight', 'ln_b': 'norm.bias', } def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None ): '''simple docstring''' UpperCAmelCase__ = False for key, mapped_key in MAPPING.items(): UpperCAmelCase__ = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: UpperCAmelCase__ = True if "*" in mapped_key: UpperCAmelCase__ = name.split(SCREAMING_SNAKE_CASE__ )[0].split(""".""" )[-2] UpperCAmelCase__ = mapped_key.replace("""*""" , SCREAMING_SNAKE_CASE__ ) if "weight_g" in name: UpperCAmelCase__ = """weight_g""" elif "weight_v" in name: UpperCAmelCase__ = """weight_v""" elif "bias" in name: UpperCAmelCase__ = """bias""" elif "weight" in name: # TODO: don't match quantizer.weight_proj UpperCAmelCase__ = """weight""" else: UpperCAmelCase__ = None if hf_dict is not None: rename_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else: set_recursively(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return is_used return is_used def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] ): '''simple docstring''' UpperCAmelCase__ = [] UpperCAmelCase__ = fairseq_model.state_dict() UpperCAmelCase__ = hf_model.wavaveca.feature_extractor for name, value in fairseq_dict.items(): UpperCAmelCase__ = False if "conv_layers" in name: load_conv_layer( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , hf_model.config.feat_extract_norm == """group""" , ) UpperCAmelCase__ = True else: UpperCAmelCase__ = load_wavaveca_layer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if not is_used: unused_weights.append(SCREAMING_SNAKE_CASE__ ) logger.warning(F'''Unused weights: {unused_weights}''' ) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' UpperCAmelCase__ = full_name.split("""conv_layers.""" )[-1] UpperCAmelCase__ = name.split(""".""" ) UpperCAmelCase__ = int(items[0] ) UpperCAmelCase__ = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) UpperCAmelCase__ = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) UpperCAmelCase__ = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' ) UpperCAmelCase__ = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' ) UpperCAmelCase__ = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(SCREAMING_SNAKE_CASE__ ) @torch.no_grad() def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False ): '''simple docstring''' if config_path is not None: UpperCAmelCase__ = WavaVecaConfig.from_pretrained(SCREAMING_SNAKE_CASE__ ) else: UpperCAmelCase__ = WavaVecaConfig() if is_seq_class: UpperCAmelCase__ = read_txt_into_dict(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = idalabel UpperCAmelCase__ = WavaVecaForSequenceClassification(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , ) feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE__ ) elif is_finetuned: if dict_path: UpperCAmelCase__ = Dictionary.load(SCREAMING_SNAKE_CASE__ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq UpperCAmelCase__ = target_dict.pad_index UpperCAmelCase__ = target_dict.bos_index UpperCAmelCase__ = target_dict.eos_index UpperCAmelCase__ = len(target_dict.symbols ) UpperCAmelCase__ = os.path.join(SCREAMING_SNAKE_CASE__ , """vocab.json""" ) if not os.path.isdir(SCREAMING_SNAKE_CASE__ ): logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(SCREAMING_SNAKE_CASE__ ) ) return os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = target_dict.indices # fairseq has the <pad> and <s> switched UpperCAmelCase__ = 0 UpperCAmelCase__ = 1 with open(SCREAMING_SNAKE_CASE__ , """w""" , encoding="""utf-8""" ) as vocab_handle: json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = WavaVecaCTCTokenizer( SCREAMING_SNAKE_CASE__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=SCREAMING_SNAKE_CASE__ , ) UpperCAmelCase__ = True if config.feat_extract_norm == """layer""" else False UpperCAmelCase__ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , ) UpperCAmelCase__ = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ ) processor.save_pretrained(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = WavaVecaForCTC(SCREAMING_SNAKE_CASE__ ) else: UpperCAmelCase__ = WavaVecaForPreTraining(SCREAMING_SNAKE_CASE__ ) if is_finetuned or is_seq_class: UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) else: UpperCAmelCase__ = argparse.Namespace(task="""audio_pretraining""" ) UpperCAmelCase__ = fairseq.tasks.setup_task(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = model[0].eval() recursively_load_weights(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , not is_finetuned ) hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not' ) parser.add_argument( '--is_seq_class', action='store_true', help='Whether the model to convert is a fine-tuned sequence classification model or not', ) UpperCAmelCase_ = parser.parse_args() UpperCAmelCase_ = not args.not_finetuned and not args.is_seq_class convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, is_finetuned, args.is_seq_class, )
346
0
"""simple docstring""" # This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/ import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, ControlNetModel, DDIMScheduler, StableDiffusionControlNetImgaImgPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, ) enable_full_determinism() class __A ( A_ ,A_ ,A_ ,unittest.TestCase ): '''simple docstring''' lowerCAmelCase : Union[str, Any] = StableDiffusionControlNetImgaImgPipeline lowerCAmelCase : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} lowerCAmelCase : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS lowerCAmelCase : Tuple = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"control_image"} ) lowerCAmelCase : int = IMAGE_TO_IMAGE_IMAGE_PARAMS def UpperCAmelCase ( self : int ) -> Union[str, Any]: """simple docstring""" torch.manual_seed(0 ) lowercase__ : Optional[int] = UNetaDConditionModel( block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') ,up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') ,cross_attention_dim=32 ,) torch.manual_seed(0 ) lowercase__ : Any = ControlNetModel( block_out_channels=(32, 64) ,layers_per_block=2 ,in_channels=4 ,down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') ,cross_attention_dim=32 ,conditioning_embedding_out_channels=(16, 32) ,) torch.manual_seed(0 ) lowercase__ : List[str] = DDIMScheduler( beta_start=0.0_0085 ,beta_end=0.012 ,beta_schedule='''scaled_linear''' ,clip_sample=_snake_case ,set_alpha_to_one=_snake_case ,) torch.manual_seed(0 ) lowercase__ : List[str] = AutoencoderKL( block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] ,up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] ,latent_channels=4 ,) torch.manual_seed(0 ) lowercase__ : Optional[Any] = CLIPTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_000 ,) lowercase__ : List[Any] = CLIPTextModel(_snake_case ) lowercase__ : Union[str, Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) lowercase__ : Dict = { '''unet''': unet, '''controlnet''': controlnet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def UpperCAmelCase ( self : Any ,_snake_case : List[Any] ,_snake_case : Any=0 ) -> Any: """simple docstring""" if str(_snake_case ).startswith('''mps''' ): lowercase__ : Optional[Any] = torch.manual_seed(_snake_case ) else: lowercase__ : str = torch.Generator(device=_snake_case ).manual_seed(_snake_case ) lowercase__ : List[Any] = 2 lowercase__ : Optional[int] = randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) ,generator=_snake_case ,device=torch.device(_snake_case ) ,) lowercase__ : str = floats_tensor(control_image.shape ,rng=random.Random(_snake_case ) ).to(_snake_case ) lowercase__ : Optional[Any] = image.cpu().permute(0 ,2 ,3 ,1 )[0] lowercase__ : Optional[Any] = Image.fromarray(np.uinta(_snake_case ) ).convert('''RGB''' ).resize((64, 64) ) lowercase__ : Union[str, Any] = { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', '''image''': image, '''control_image''': control_image, } return inputs def UpperCAmelCase ( self : Optional[int] ) -> Dict: """simple docstring""" return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 ) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() ,reason='''XFormers attention is only available with CUDA and `xformers` installed''' ,) def UpperCAmelCase ( self : Optional[Any] ) -> str: """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 ) def UpperCAmelCase ( self : Any ) -> str: """simple docstring""" self._test_inference_batch_single_identical(expected_max_diff=2e-3 ) class __A ( A_ ,A_ ,unittest.TestCase ): '''simple docstring''' lowerCAmelCase : Dict = StableDiffusionControlNetImgaImgPipeline lowerCAmelCase : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} lowerCAmelCase : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS lowerCAmelCase : Dict = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess def UpperCAmelCase ( self : Tuple ) -> Any: """simple docstring""" torch.manual_seed(0 ) lowercase__ : List[Any] = UNetaDConditionModel( block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') ,up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') ,cross_attention_dim=32 ,) torch.manual_seed(0 ) def init_weights(_snake_case : Optional[int] ): if isinstance(_snake_case ,torch.nn.Convad ): torch.nn.init.normal(m.weight ) m.bias.data.fill_(1.0 ) lowercase__ : Any = ControlNetModel( block_out_channels=(32, 64) ,layers_per_block=2 ,in_channels=4 ,down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') ,cross_attention_dim=32 ,conditioning_embedding_out_channels=(16, 32) ,) controlneta.controlnet_down_blocks.apply(_snake_case ) torch.manual_seed(0 ) lowercase__ : Any = ControlNetModel( block_out_channels=(32, 64) ,layers_per_block=2 ,in_channels=4 ,down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') ,cross_attention_dim=32 ,conditioning_embedding_out_channels=(16, 32) ,) controlneta.controlnet_down_blocks.apply(_snake_case ) torch.manual_seed(0 ) lowercase__ : Dict = DDIMScheduler( beta_start=0.0_0085 ,beta_end=0.012 ,beta_schedule='''scaled_linear''' ,clip_sample=_snake_case ,set_alpha_to_one=_snake_case ,) torch.manual_seed(0 ) lowercase__ : List[str] = AutoencoderKL( block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] ,up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] ,latent_channels=4 ,) torch.manual_seed(0 ) lowercase__ : List[Any] = CLIPTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_000 ,) lowercase__ : int = CLIPTextModel(_snake_case ) lowercase__ : Union[str, Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) lowercase__ : int = MultiControlNetModel([controlneta, controlneta] ) lowercase__ : Optional[Any] = { '''unet''': unet, '''controlnet''': controlnet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Dict ,_snake_case : Union[str, Any]=0 ) -> List[Any]: """simple docstring""" if str(_snake_case ).startswith('''mps''' ): lowercase__ : int = torch.manual_seed(_snake_case ) else: lowercase__ : Dict = torch.Generator(device=_snake_case ).manual_seed(_snake_case ) lowercase__ : int = 2 lowercase__ : Optional[Any] = [ randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) ,generator=_snake_case ,device=torch.device(_snake_case ) ,), randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) ,generator=_snake_case ,device=torch.device(_snake_case ) ,), ] lowercase__ : Dict = floats_tensor(control_image[0].shape ,rng=random.Random(_snake_case ) ).to(_snake_case ) lowercase__ : Dict = image.cpu().permute(0 ,2 ,3 ,1 )[0] lowercase__ : Optional[int] = Image.fromarray(np.uinta(_snake_case ) ).convert('''RGB''' ).resize((64, 64) ) lowercase__ : Any = { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', '''image''': image, '''control_image''': control_image, } return inputs def UpperCAmelCase ( self : Dict ) -> List[str]: """simple docstring""" lowercase__ : Dict = self.get_dummy_components() lowercase__ : Dict = self.pipeline_class(**_snake_case ) pipe.to(_snake_case ) lowercase__ : Optional[Any] = 10.0 lowercase__ : Tuple = 4 lowercase__ : Dict = self.get_dummy_inputs(_snake_case ) lowercase__ : Optional[Any] = steps lowercase__ : Any = scale lowercase__ : Optional[Any] = pipe(**_snake_case )[0] lowercase__ : List[str] = self.get_dummy_inputs(_snake_case ) lowercase__ : Optional[int] = steps lowercase__ : int = scale lowercase__ : List[str] = pipe(**_snake_case ,control_guidance_start=0.1 ,control_guidance_end=0.2 )[0] lowercase__ : int = self.get_dummy_inputs(_snake_case ) lowercase__ : Optional[int] = steps lowercase__ : Dict = scale lowercase__ : Dict = pipe(**_snake_case ,control_guidance_start=[0.1, 0.3] ,control_guidance_end=[0.2, 0.7] )[0] lowercase__ : Dict = self.get_dummy_inputs(_snake_case ) lowercase__ : List[Any] = steps lowercase__ : Optional[int] = scale lowercase__ : List[Any] = pipe(**_snake_case ,control_guidance_start=0.4 ,control_guidance_end=[0.5, 0.8] )[0] # make sure that all outputs are different assert np.sum(np.abs(output_a - output_a ) ) > 1e-3 assert np.sum(np.abs(output_a - output_a ) ) > 1e-3 assert np.sum(np.abs(output_a - output_a ) ) > 1e-3 def UpperCAmelCase ( self : Tuple ) -> Optional[int]: """simple docstring""" return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 ) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() ,reason='''XFormers attention is only available with CUDA and `xformers` installed''' ,) def UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 ) def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" self._test_inference_batch_single_identical(expected_max_diff=2e-3 ) def UpperCAmelCase ( self : List[str] ) -> Dict: """simple docstring""" lowercase__ : Union[str, Any] = self.get_dummy_components() lowercase__ : Optional[Any] = self.pipeline_class(**_snake_case ) pipe.to(_snake_case ) pipe.set_progress_bar_config(disable=_snake_case ) with tempfile.TemporaryDirectory() as tmpdir: try: # save_pretrained is not implemented for Multi-ControlNet pipe.save_pretrained(_snake_case ) except NotImplementedError: pass @slow @require_torch_gpu class __A ( unittest.TestCase ): '''simple docstring''' def UpperCAmelCase ( self : Any ) -> int: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" lowercase__ : int = ControlNetModel.from_pretrained('''lllyasviel/sd-controlnet-canny''' ) lowercase__ : Any = StableDiffusionControlNetImgaImgPipeline.from_pretrained( '''runwayml/stable-diffusion-v1-5''' ,safety_checker=_snake_case ,controlnet=_snake_case ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=_snake_case ) lowercase__ : Optional[Any] = torch.Generator(device='''cpu''' ).manual_seed(0 ) lowercase__ : List[str] = '''evil space-punk bird''' lowercase__ : Optional[Any] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' ).resize((512, 512) ) lowercase__ : Tuple = load_image( '''https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png''' ).resize((512, 512) ) lowercase__ : List[Any] = pipe( _snake_case ,_snake_case ,control_image=_snake_case ,generator=_snake_case ,output_type='''np''' ,num_inference_steps=50 ,strength=0.6 ,) lowercase__ : List[Any] = output.images[0] assert image.shape == (512, 512, 3) lowercase__ : Dict = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy''' ) assert np.abs(expected_image - image ).max() < 9e-2
16
'''simple docstring''' import itertools import os from collections import Counter, defaultdict from concurrent.futures import ThreadPoolExecutor, as_completed import numpy as np import datasets from .execute import check_correctness UpperCAmelCase_ = '\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n' UpperCAmelCase_ = '\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper "Evaluating Large Language Models Trained on Code"\n(https://arxiv.org/abs/2107.03374).\n' UpperCAmelCase_ = '\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric("code_eval")\n >>> test_cases = ["assert add(2,3)==5"]\n >>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {\'pass@1\': 0.5, \'pass@2\': 1.0}\n' UpperCAmelCase_ = '\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe "code_eval" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper "Evaluating Large\nLanguage Models Trained on Code" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"\n\n################################################################################\\n' UpperCAmelCase_ = 'The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the "Software"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase_ ( datasets.Metric ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" return datasets.MetricInfo( # This is the description that will appear on the metrics page. description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""" ) ), """references""": datasets.Value("""string""" ), } ) , homepage="""https://github.com/openai/human-eval""" , codebase_urls=["""https://github.com/openai/human-eval"""] , reference_urls=["""https://github.com/openai/human-eval"""] , license=_LICENSE , ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str]=[1, 10, 1_00] , _UpperCAmelCase : Optional[Any]=4 , _UpperCAmelCase : Any=3.0 ): """simple docstring""" if os.getenv("""HF_ALLOW_CODE_EVAL""" , 0 ) != "1": raise ValueError(_WARNING ) if os.name == "nt": raise NotImplementedError("""This metric is currently not supported on Windows.""" ) with ThreadPoolExecutor(max_workers=_UpperCAmelCase ) as executor: UpperCAmelCase__ = [] UpperCAmelCase__ = Counter() UpperCAmelCase__ = 0 UpperCAmelCase__ = defaultdict(_UpperCAmelCase ) for task_id, (candidates, test_case) in enumerate(zip(_UpperCAmelCase , _UpperCAmelCase ) ): for candidate in candidates: UpperCAmelCase__ = candidate + """\n""" + test_case UpperCAmelCase__ = (test_program, timeout, task_id, completion_id[task_id]) UpperCAmelCase__ = executor.submit(_UpperCAmelCase , *_UpperCAmelCase ) futures.append(_UpperCAmelCase ) completion_id[task_id] += 1 n_samples += 1 for future in as_completed(_UpperCAmelCase ): UpperCAmelCase__ = future.result() results[result["task_id"]].append((result["""completion_id"""], result) ) UpperCAmelCase__ , UpperCAmelCase__ = [], [] for result in results.values(): result.sort() UpperCAmelCase__ = [r[1]["""passed"""] for r in result] total.append(len(_UpperCAmelCase ) ) correct.append(sum(_UpperCAmelCase ) ) UpperCAmelCase__ = np.array(_UpperCAmelCase ) UpperCAmelCase__ = np.array(_UpperCAmelCase ) UpperCAmelCase__ = k UpperCAmelCase__ = {f'''pass@{k}''': estimate_pass_at_k(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).mean() for k in ks if (total >= k).all()} return pass_at_k, results def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] ): '''simple docstring''' def estimator(SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> float: if n - c < k: return 1.0 return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): UpperCAmelCase__ = itertools.repeat(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ) else: assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = iter(SCREAMING_SNAKE_CASE__ ) return np.array([estimator(int(SCREAMING_SNAKE_CASE__ ) , int(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) for n, c in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )] )
346
0
"""simple docstring""" from pathlib import Path import fire from tqdm import tqdm def _A ( UpperCamelCase_ : Optional[Any]="ro", UpperCamelCase_ : str="en", UpperCamelCase_ : Union[str, Any]="wmt16", UpperCamelCase_ : Tuple=None) -> None: '''simple docstring''' try: import datasets except (ModuleNotFoundError, ImportError): raise ImportError("run pip install datasets") __lowercase = F"""{src_lang}-{tgt_lang}""" print(F"""Converting {dataset}-{pair}""") __lowercase = datasets.load_dataset(UpperCamelCase_, UpperCamelCase_) if save_dir is None: __lowercase = F"""{dataset}-{pair}""" __lowercase = Path(UpperCamelCase_) save_dir.mkdir(exist_ok=UpperCamelCase_) for split in ds.keys(): print(F"""Splitting {split} with {ds[split].num_rows} records""") # to save to val.source, val.target like summary datasets __lowercase = "val" if split == "validation" else split __lowercase = save_dir.joinpath(F"""{fn}.source""") __lowercase = save_dir.joinpath(F"""{fn}.target""") __lowercase = src_path.open("w+") __lowercase = tgt_path.open("w+") # reader is the bottleneck so writing one record at a time doesn't slow things down for x in tqdm(ds[split]): __lowercase = x["translation"] src_fp.write(ex[src_lang] + "\n") tgt_fp.write(ex[tgt_lang] + "\n") print(F"""Saved {dataset} dataset to {save_dir}""") if __name__ == "__main__": fire.Fire(download_wmt_dataset)
17
'''simple docstring''' import math def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or not number % 2: # Negatives, 0, 1 and all even numbers are not primes return False UpperCAmelCase__ = range(3 , int(math.sqrt(SCREAMING_SNAKE_CASE__ ) + 1 ) , 2 ) return not any(not number % i for i in odd_numbers ) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str]=1 , **SCREAMING_SNAKE_CASE__ : List[str] ): '''simple docstring''' UpperCAmelCase__ = factor * value UpperCAmelCase__ = value while not is_prime(SCREAMING_SNAKE_CASE__ ): value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1 if value == first_value_val: return next_prime(value + 1 , **SCREAMING_SNAKE_CASE__ ) return value
346
0
from math import pow, sqrt def _snake_case ( *lowerCAmelCase : float ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = len(lowerCAmelCase ) > 0 and all(value > 0.0 for value in values ) return result def _snake_case ( lowerCAmelCase : float , lowerCAmelCase : float ): """simple docstring""" return ( round(sqrt(molar_mass_a / molar_mass_a ) , 6 ) if validate(lowerCAmelCase , lowerCAmelCase ) else ValueError("Input Error: Molar mass values must greater than 0." ) ) def _snake_case ( lowerCAmelCase : float , lowerCAmelCase : float , lowerCAmelCase : float ): """simple docstring""" return ( round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 ) if validate(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) else ValueError( "Input Error: Molar mass and effusion rate values must greater than 0." ) ) def _snake_case ( lowerCAmelCase : float , lowerCAmelCase : float , lowerCAmelCase : float ): """simple docstring""" return ( round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 ) if validate(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) else ValueError( "Input Error: Molar mass and effusion rate values must greater than 0." ) ) def _snake_case ( lowerCAmelCase : float , lowerCAmelCase : float , lowerCAmelCase : float ): """simple docstring""" return ( round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 ) if validate(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) else ValueError( "Input Error: Molar mass and effusion rate values must greater than 0." ) ) def _snake_case ( lowerCAmelCase : float , lowerCAmelCase : float , lowerCAmelCase : float ): """simple docstring""" return ( round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 ) if validate(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) else ValueError( "Input Error: Molar mass and effusion rate values must greater than 0." ) )
18
'''simple docstring''' import string from math import logaa def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' UpperCAmelCase__ = document.translate( str.maketrans("""""" , """""" , string.punctuation ) ).replace("""\n""" , """""" ) UpperCAmelCase__ = document_without_punctuation.split(""" """ ) # word tokenization return len([word for word in tokenize_document if word.lower() == term.lower()] ) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' UpperCAmelCase__ = corpus.lower().translate( str.maketrans("""""" , """""" , string.punctuation ) ) # strip all punctuation and replace it with '' UpperCAmelCase__ = corpus_without_punctuation.split("""\n""" ) UpperCAmelCase__ = term.lower() return (len([doc for doc in docs if term in doc] ), len(SCREAMING_SNAKE_CASE__ )) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple=False ): '''simple docstring''' if smoothing: if n == 0: raise ValueError("""log10(0) is undefined.""" ) return round(1 + logaa(n / (1 + df) ) , 3 ) if df == 0: raise ZeroDivisionError("""df must be > 0""" ) elif n == 0: raise ValueError("""log10(0) is undefined.""" ) return round(logaa(n / df ) , 3 ) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' return round(tf * idf , 3 )
346
0
import json import os import unittest from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class _SCREAMING_SNAKE_CASE ( snake_case_ , unittest.TestCase ): lowerCAmelCase__ = BioGptTokenizer lowerCAmelCase__ = False def SCREAMING_SNAKE_CASE_( self ) -> int: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowerCamelCase_ = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "w</w>", "r</w>", "t</w>", "lo", "low", "er</w>", "low</w>", "lowest</w>", "newer</w>", "wider</w>", "<unk>", ] lowerCamelCase_ = dict(zip(lowercase , range(len(lowercase ) ) ) ) lowerCamelCase_ = ["l o 123", "lo w 1456", "e r</w> 1789", ""] lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" ) as fp: fp.write(json.dumps(lowercase ) ) with open(self.merges_file , "w" ) as fp: fp.write("\n".join(lowercase ) ) def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Any: lowerCamelCase_ = "lower newer" lowerCamelCase_ = "lower newer" return input_text, output_text def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]: lowerCamelCase_ = BioGptTokenizer(self.vocab_file , self.merges_file ) lowerCamelCase_ = "lower" lowerCamelCase_ = ["low", "er</w>"] lowerCamelCase_ = tokenizer.tokenize(lowercase ) self.assertListEqual(lowercase , lowercase ) lowerCamelCase_ = tokens + ["<unk>"] lowerCamelCase_ = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , lowercase ) @slow def SCREAMING_SNAKE_CASE_( self ) -> Any: lowerCamelCase_ = BioGptTokenizer.from_pretrained("microsoft/biogpt" ) lowerCamelCase_ = tokenizer.encode("sequence builders" , add_special_tokens=lowercase ) lowerCamelCase_ = tokenizer.encode("multi-sequence build" , add_special_tokens=lowercase ) lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(lowercase ) lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(lowercase , lowercase ) self.assertTrue(encoded_sentence == [2] + text ) self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
19
'''simple docstring''' import argparse import torch from transformers import BertForMaskedLM if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser( description=( 'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned' ' Distillation' ) ) parser.add_argument('--model_type', default='bert', choices=['bert']) parser.add_argument('--model_name', default='bert-base-uncased', type=str) parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str) parser.add_argument('--vocab_transform', action='store_true') UpperCAmelCase_ = parser.parse_args() if args.model_type == "bert": UpperCAmelCase_ = BertForMaskedLM.from_pretrained(args.model_name) UpperCAmelCase_ = 'bert' else: raise ValueError('args.model_type should be "bert".') UpperCAmelCase_ = model.state_dict() UpperCAmelCase_ = {} for w in ["word_embeddings", "position_embeddings"]: UpperCAmelCase_ = state_dict[f"{prefix}.embeddings.{w}.weight"] for w in ["weight", "bias"]: UpperCAmelCase_ = state_dict[f"{prefix}.embeddings.LayerNorm.{w}"] UpperCAmelCase_ = 0 for teacher_idx in [0, 2, 4, 7, 9, 1_1]: for w in ["weight", "bias"]: UpperCAmelCase_ = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}" ] UpperCAmelCase_ = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}" ] UpperCAmelCase_ = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}" ] UpperCAmelCase_ = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}" ] UpperCAmelCase_ = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}" ] UpperCAmelCase_ = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}" ] UpperCAmelCase_ = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}" ] UpperCAmelCase_ = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}" ] std_idx += 1 UpperCAmelCase_ = state_dict['cls.predictions.decoder.weight'] UpperCAmelCase_ = state_dict['cls.predictions.bias'] if args.vocab_transform: for w in ["weight", "bias"]: UpperCAmelCase_ = state_dict[f"cls.predictions.transform.dense.{w}"] UpperCAmelCase_ = state_dict[f"cls.predictions.transform.LayerNorm.{w}"] print(f"N layers selected for distillation: {std_idx}") print(f"Number of params transferred for distillation: {len(compressed_sd.keys())}") print(f"Save transferred checkpoint to {args.dump_checkpoint}.") torch.save(compressed_sd, args.dump_checkpoint)
346
0
import functools def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int: # Validation if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or not all(isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for day in days ): raise ValueError("""The parameter days should be a list of integers""" ) if len(SCREAMING_SNAKE_CASE__ ) != 3 or not all(isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for cost in costs ): raise ValueError("""The parameter costs should be a list of three integers""" ) if len(SCREAMING_SNAKE_CASE__ ) == 0: return 0 if min(SCREAMING_SNAKE_CASE__ ) <= 0: raise ValueError("""All days elements should be greater than 0""" ) if max(SCREAMING_SNAKE_CASE__ ) >= 366: raise ValueError("""All days elements should be less than 366""" ) lowercase : List[Any] = set(SCREAMING_SNAKE_CASE__ ) @functools.cache def dynamic_programming(SCREAMING_SNAKE_CASE__ ) -> int: if index > 365: return 0 if index not in days_set: return dynamic_programming(index + 1 ) return min( costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , ) return dynamic_programming(1 ) if __name__ == "__main__": import doctest doctest.testmod()
20
'''simple docstring''' import tempfile import torch from diffusers import PNDMScheduler from .test_schedulers import SchedulerCommonTest class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' lowerCAmelCase_ : Union[str, Any] = (PNDMScheduler,) lowerCAmelCase_ : Optional[int] = (("""num_inference_steps""", 50),) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , **_UpperCAmelCase : Optional[int] ): """simple docstring""" UpperCAmelCase__ = { """num_train_timesteps""": 10_00, """beta_start""": 0.0001, """beta_end""": 0.02, """beta_schedule""": """linear""", } config.update(**_UpperCAmelCase ) return config def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : Tuple=0 , **_UpperCAmelCase : List[str] ): """simple docstring""" UpperCAmelCase__ = dict(self.forward_default_kwargs ) UpperCAmelCase__ = kwargs.pop("""num_inference_steps""" , _UpperCAmelCase ) UpperCAmelCase__ = self.dummy_sample UpperCAmelCase__ = 0.1 * sample UpperCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: UpperCAmelCase__ = self.get_scheduler_config(**_UpperCAmelCase ) UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residuals UpperCAmelCase__ = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_UpperCAmelCase ) UpperCAmelCase__ = scheduler_class.from_pretrained(_UpperCAmelCase ) new_scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residuals UpperCAmelCase__ = dummy_past_residuals[:] UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample UpperCAmelCase__ = new_scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample UpperCAmelCase__ = new_scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" pass def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : Union[str, Any]=0 , **_UpperCAmelCase : Optional[int] ): """simple docstring""" UpperCAmelCase__ = dict(self.forward_default_kwargs ) UpperCAmelCase__ = kwargs.pop("""num_inference_steps""" , _UpperCAmelCase ) UpperCAmelCase__ = self.dummy_sample UpperCAmelCase__ = 0.1 * sample UpperCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: UpperCAmelCase__ = self.get_scheduler_config() UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residuals (must be after setting timesteps) UpperCAmelCase__ = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_UpperCAmelCase ) UpperCAmelCase__ = scheduler_class.from_pretrained(_UpperCAmelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residual (must be after setting timesteps) UpperCAmelCase__ = dummy_past_residuals[:] UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample UpperCAmelCase__ = new_scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample UpperCAmelCase__ = new_scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def SCREAMING_SNAKE_CASE__ ( self : int , **_UpperCAmelCase : Tuple ): """simple docstring""" UpperCAmelCase__ = self.scheduler_classes[0] UpperCAmelCase__ = self.get_scheduler_config(**_UpperCAmelCase ) UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) UpperCAmelCase__ = 10 UpperCAmelCase__ = self.dummy_model() UpperCAmelCase__ = self.dummy_sample_deter scheduler.set_timesteps(_UpperCAmelCase ) for i, t in enumerate(scheduler.prk_timesteps ): UpperCAmelCase__ = model(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample for i, t in enumerate(scheduler.plms_timesteps ): UpperCAmelCase__ = model(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample return sample def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" UpperCAmelCase__ = dict(self.forward_default_kwargs ) UpperCAmelCase__ = kwargs.pop("""num_inference_steps""" , _UpperCAmelCase ) for scheduler_class in self.scheduler_classes: UpperCAmelCase__ = self.get_scheduler_config() UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) UpperCAmelCase__ = self.dummy_sample UpperCAmelCase__ = 0.1 * sample if num_inference_steps is not None and hasattr(_UpperCAmelCase , """set_timesteps""" ): scheduler.set_timesteps(_UpperCAmelCase ) elif num_inference_steps is not None and not hasattr(_UpperCAmelCase , """set_timesteps""" ): UpperCAmelCase__ = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) UpperCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] UpperCAmelCase__ = dummy_past_residuals[:] UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , 0 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , 1 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , 0 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , 1 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def SCREAMING_SNAKE_CASE__ ( self : int ): """simple docstring""" for timesteps in [1_00, 10_00]: self.check_over_configs(num_train_timesteps=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" for steps_offset in [0, 1]: self.check_over_configs(steps_offset=_UpperCAmelCase ) UpperCAmelCase__ = self.scheduler_classes[0] UpperCAmelCase__ = self.get_scheduler_config(steps_offset=1 ) UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(10 ) assert torch.equal( scheduler.timesteps , torch.LongTensor( [9_01, 8_51, 8_51, 8_01, 8_01, 7_51, 7_51, 7_01, 7_01, 6_51, 6_51, 6_01, 6_01, 5_01, 4_01, 3_01, 2_01, 1_01, 1] ) , ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): """simple docstring""" for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ): self.check_over_configs(beta_start=_UpperCAmelCase , beta_end=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" for t in [1, 5, 10]: self.check_over_forward(time_step=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00] ): self.check_over_forward(num_inference_steps=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" UpperCAmelCase__ = 27 for scheduler_class in self.scheduler_classes: UpperCAmelCase__ = self.dummy_sample UpperCAmelCase__ = 0.1 * sample UpperCAmelCase__ = self.get_scheduler_config() UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(_UpperCAmelCase ) # before power of 3 fix, would error on first step, so we only need to do two for i, t in enumerate(scheduler.prk_timesteps[:2] ): UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample def SCREAMING_SNAKE_CASE__ ( self : Dict ): """simple docstring""" with self.assertRaises(_UpperCAmelCase ): UpperCAmelCase__ = self.scheduler_classes[0] UpperCAmelCase__ = self.get_scheduler_config() UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): """simple docstring""" UpperCAmelCase__ = self.full_loop() UpperCAmelCase__ = torch.sum(torch.abs(_UpperCAmelCase ) ) UpperCAmelCase__ = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_sum.item() - 198.1318 ) < 1E-2 assert abs(result_mean.item() - 0.2580 ) < 1E-3 def SCREAMING_SNAKE_CASE__ ( self : Dict ): """simple docstring""" UpperCAmelCase__ = self.full_loop(prediction_type="""v_prediction""" ) UpperCAmelCase__ = torch.sum(torch.abs(_UpperCAmelCase ) ) UpperCAmelCase__ = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_sum.item() - 67.3986 ) < 1E-2 assert abs(result_mean.item() - 0.0878 ) < 1E-3 def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" UpperCAmelCase__ = self.full_loop(set_alpha_to_one=_UpperCAmelCase , beta_start=0.01 ) UpperCAmelCase__ = torch.sum(torch.abs(_UpperCAmelCase ) ) UpperCAmelCase__ = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_sum.item() - 230.0399 ) < 1E-2 assert abs(result_mean.item() - 0.2995 ) < 1E-3 def SCREAMING_SNAKE_CASE__ ( self : int ): """simple docstring""" UpperCAmelCase__ = self.full_loop(set_alpha_to_one=_UpperCAmelCase , beta_start=0.01 ) UpperCAmelCase__ = torch.sum(torch.abs(_UpperCAmelCase ) ) UpperCAmelCase__ = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_sum.item() - 186.9482 ) < 1E-2 assert abs(result_mean.item() - 0.2434 ) < 1E-3
346
0
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> List[str]: _enforce_args(lowerCamelCase_ , lowerCamelCase_ ) if n == 0: return 0 _lowercase : Union[str, Any] = float('-inf' ) for i in range(1 , n + 1 ): _lowercase : int = max( lowerCamelCase_ , prices[i - 1] + naive_cut_rod_recursive(n - i , lowerCamelCase_ ) ) return max_revue def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Any: _enforce_args(lowerCamelCase_ , lowerCamelCase_ ) _lowercase : Optional[Any] = [float('-inf' ) for _ in range(n + 1 )] return _top_down_cut_rod_recursive(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Any: if max_rev[n] >= 0: return max_rev[n] elif n == 0: return 0 else: _lowercase : Any = float('-inf' ) for i in range(1 , n + 1 ): _lowercase : List[Any] = max( lowerCamelCase_ , prices[i - 1] + _top_down_cut_rod_recursive(n - i , lowerCamelCase_ , lowerCamelCase_ ) , ) _lowercase : Dict = max_revenue return max_rev[n] def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]: _enforce_args(lowerCamelCase_ , lowerCamelCase_ ) # length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of # length 0. _lowercase : int = [float('-inf' ) for _ in range(n + 1 )] _lowercase : str = 0 for i in range(1 , n + 1 ): _lowercase : Tuple = max_rev[i] for j in range(1 , i + 1 ): _lowercase : Any = max(lowerCamelCase_ , prices[j - 1] + max_rev[i - j] ) _lowercase : Optional[Any] = max_revenue_i return max_rev[n] def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> List[str]: if n < 0: _lowercase : Optional[int] = F'''n must be greater than or equal to 0. Got n = {n}''' raise ValueError(lowerCamelCase_ ) if n > len(lowerCamelCase_ ): _lowercase : Tuple = ( 'Each integral piece of rod must have a corresponding price. ' F'''Got n = {n} but length of prices = {len(lowerCamelCase_ )}''' ) raise ValueError(lowerCamelCase_ ) def UpperCamelCase_( ) -> Optional[int]: _lowercase : List[str] = [6, 10, 12, 15, 20, 23] _lowercase : Any = len(lowerCamelCase_ ) # the best revenue comes from cutting the rod into 6 pieces, each # of length 1 resulting in a revenue of 6 * 6 = 36. _lowercase : Tuple = 36 _lowercase : int = top_down_cut_rod(lowerCamelCase_ , lowerCamelCase_ ) _lowercase : Tuple = bottom_up_cut_rod(lowerCamelCase_ , lowerCamelCase_ ) _lowercase : int = naive_cut_rod_recursive(lowerCamelCase_ , lowerCamelCase_ ) assert expected_max_revenue == max_rev_top_down assert max_rev_top_down == max_rev_bottom_up assert max_rev_bottom_up == max_rev_naive if __name__ == "__main__": main()
21
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { 'google/vivit-b-16x2-kinetics400': ( 'https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json' ), # See all Vivit models at https://huggingface.co/models?filter=vivit } class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' lowerCAmelCase_ : Optional[int] = """vivit""" def __init__( self : List[str] , _UpperCAmelCase : List[Any]=2_24 , _UpperCAmelCase : List[str]=32 , _UpperCAmelCase : Any=[2, 16, 16] , _UpperCAmelCase : int=3 , _UpperCAmelCase : Optional[Any]=7_68 , _UpperCAmelCase : Union[str, Any]=12 , _UpperCAmelCase : Dict=12 , _UpperCAmelCase : Optional[Any]=30_72 , _UpperCAmelCase : Optional[int]="gelu_fast" , _UpperCAmelCase : Union[str, Any]=0.0 , _UpperCAmelCase : Tuple=0.0 , _UpperCAmelCase : Optional[int]=0.02 , _UpperCAmelCase : List[Any]=1E-06 , _UpperCAmelCase : List[str]=True , **_UpperCAmelCase : List[Any] , ): """simple docstring""" UpperCAmelCase__ = hidden_size UpperCAmelCase__ = num_hidden_layers UpperCAmelCase__ = num_attention_heads UpperCAmelCase__ = intermediate_size UpperCAmelCase__ = hidden_act UpperCAmelCase__ = hidden_dropout_prob UpperCAmelCase__ = attention_probs_dropout_prob UpperCAmelCase__ = initializer_range UpperCAmelCase__ = layer_norm_eps UpperCAmelCase__ = image_size UpperCAmelCase__ = num_frames UpperCAmelCase__ = tubelet_size UpperCAmelCase__ = num_channels UpperCAmelCase__ = qkv_bias super().__init__(**_UpperCAmelCase )
346
0
'''simple docstring''' from queue import Queue from typing import TYPE_CHECKING, Optional if TYPE_CHECKING: from ..models.auto import AutoTokenizer class A_ : def lowercase ( self : str , snake_case_ : int ): raise NotImplementedError() def lowercase ( self : Any ): raise NotImplementedError() class A_ ( lowerCAmelCase_ ): def __init__( self : str , snake_case_ : "AutoTokenizer" , snake_case_ : bool = False , **snake_case_ : Tuple ): _UpperCAmelCase = tokenizer _UpperCAmelCase = skip_prompt _UpperCAmelCase = decode_kwargs # variables used in the streaming process _UpperCAmelCase = [] _UpperCAmelCase = 0 _UpperCAmelCase = True def lowercase ( self : Tuple , snake_case_ : List[str] ): if len(value.shape ) > 1 and value.shape[0] > 1: raise ValueError("TextStreamer only supports batch size 1" ) elif len(value.shape ) > 1: _UpperCAmelCase = value[0] if self.skip_prompt and self.next_tokens_are_prompt: _UpperCAmelCase = False return # Add the new token to the cache and decodes the entire thing. self.token_cache.extend(value.tolist() ) _UpperCAmelCase = self.tokenizer.decode(self.token_cache , **self.decode_kwargs ) # After the symbol for a new line, we flush the cache. if text.endswith("\n" ): _UpperCAmelCase = text[self.print_len :] _UpperCAmelCase = [] _UpperCAmelCase = 0 # If the last token is a CJK character, we print the characters. elif len(snake_case_ ) > 0 and self._is_chinese_char(ord(text[-1] ) ): _UpperCAmelCase = text[self.print_len :] self.print_len += len(snake_case_ ) # Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words, # which may change with the subsequent token -- there are probably smarter ways to do this!) else: _UpperCAmelCase = text[self.print_len : text.rfind(" " ) + 1] self.print_len += len(snake_case_ ) self.on_finalized_text(snake_case_ ) def lowercase ( self : Optional[int] ): # Flush the cache, if it exists if len(self.token_cache ) > 0: _UpperCAmelCase = self.tokenizer.decode(self.token_cache , **self.decode_kwargs ) _UpperCAmelCase = text[self.print_len :] _UpperCAmelCase = [] _UpperCAmelCase = 0 else: _UpperCAmelCase = "" _UpperCAmelCase = True self.on_finalized_text(snake_case_ , stream_end=snake_case_ ) def lowercase ( self : Any , snake_case_ : str , snake_case_ : bool = False ): print(snake_case_ , flush=snake_case_ , end="" if not stream_end else None ) def lowercase ( self : Optional[Any] , snake_case_ : Tuple ): # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0X4e_00 and cp <= 0X9f_ff) or (cp >= 0X34_00 and cp <= 0X4d_bf) # or (cp >= 0X2_00_00 and cp <= 0X2_a6_df) # or (cp >= 0X2_a7_00 and cp <= 0X2_b7_3f) # or (cp >= 0X2_b7_40 and cp <= 0X2_b8_1f) # or (cp >= 0X2_b8_20 and cp <= 0X2_ce_af) # or (cp >= 0Xf9_00 and cp <= 0Xfa_ff) or (cp >= 0X2_f8_00 and cp <= 0X2_fa_1f) # ): # return True return False class A_ ( lowerCAmelCase_ ): def __init__( self : List[Any] , snake_case_ : "AutoTokenizer" , snake_case_ : bool = False , snake_case_ : Optional[float] = None , **snake_case_ : Union[str, Any] ): super().__init__(snake_case_ , snake_case_ , **snake_case_ ) _UpperCAmelCase = Queue() _UpperCAmelCase = None _UpperCAmelCase = timeout def lowercase ( self : Tuple , snake_case_ : str , snake_case_ : bool = False ): self.text_queue.put(snake_case_ , timeout=self.timeout ) if stream_end: self.text_queue.put(self.stop_signal , timeout=self.timeout ) def __iter__( self : str ): return self def lowercase ( self : Any ): _UpperCAmelCase = self.text_queue.get(timeout=self.timeout ) if value == self.stop_signal: raise StopIteration() else: return value
22
'''simple docstring''' import warnings from ...utils import logging from .image_processing_deit import DeiTImageProcessor UpperCAmelCase_ = logging.get_logger(__name__) class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' def __init__( self : List[str] , *_UpperCAmelCase : Optional[int] , **_UpperCAmelCase : Optional[Any] ): """simple docstring""" warnings.warn( """The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use DeiTImageProcessor instead.""" , _UpperCAmelCase , ) super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
346
0
'''simple docstring''' from typing import List, Optional, Tuple, Union import PIL import torch from torchvision import transforms from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput from diffusers.schedulers import DDIMScheduler from diffusers.utils import randn_tensor UpperCamelCase__: str = transforms.Compose( [ transforms.Resize((256, 256)), transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) def snake_case_ ( _lowerCAmelCase : Optional[int] ) -> List[str]: if isinstance(_lowerCAmelCase , torch.Tensor ): return image elif isinstance(_lowerCAmelCase , PIL.Image.Image ): UpperCAmelCase : List[Any] = [image] UpperCAmelCase : str = [trans(img.convert('''RGB''' ) ) for img in image] UpperCAmelCase : List[Any] = torch.stack(_lowerCAmelCase ) return image class SCREAMING_SNAKE_CASE( A__ ): """simple docstring""" def __init__( self : Union[str, Any] , __snake_case : int , __snake_case : List[Any] ) -> List[Any]: super().__init__() # make sure scheduler can always be converted to DDIM UpperCAmelCase : Union[str, Any] = DDIMScheduler.from_config(scheduler.config ) self.register_modules(unet=__snake_case , scheduler=__snake_case ) def A ( self : Tuple , __snake_case : Union[str, Any] ) -> str: if strength < 0 or strength > 1: raise ValueError(F"""The value of strength should in [0.0, 1.0] but is {strength}""" ) def A ( self : List[str] , __snake_case : List[str] , __snake_case : Any , __snake_case : List[Any] ) -> Optional[Any]: # get the original timestep using init_timestep UpperCAmelCase : Optional[int] = min(int(num_inference_steps * strength ) , __snake_case ) UpperCAmelCase : Tuple = max(num_inference_steps - init_timestep , 0 ) UpperCAmelCase : Tuple = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def A ( self : Union[str, Any] , __snake_case : List[str] , __snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : str=None ) -> List[Any]: if not isinstance(__snake_case , (torch.Tensor, PIL.Image.Image, list) ): raise ValueError( F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(__snake_case )}""" ) UpperCAmelCase : Optional[Any] = image.to(device=__snake_case , dtype=__snake_case ) if isinstance(__snake_case , __snake_case ) and len(__snake_case ) != batch_size: raise ValueError( F"""You have passed a list of generators of length {len(__snake_case )}, but requested an effective batch""" F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" ) UpperCAmelCase : Optional[int] = init_latents.shape UpperCAmelCase : Tuple = randn_tensor(__snake_case , generator=__snake_case , device=__snake_case , dtype=__snake_case ) # get latents print('''add noise to latents at timestep''' , __snake_case ) UpperCAmelCase : List[Any] = self.scheduler.add_noise(__snake_case , __snake_case , __snake_case ) UpperCAmelCase : Any = init_latents return latents @torch.no_grad() def __call__( self : Union[str, Any] , __snake_case : Union[torch.FloatTensor, PIL.Image.Image] = None , __snake_case : float = 0.8 , __snake_case : int = 1 , __snake_case : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __snake_case : float = 0.0 , __snake_case : int = 50 , __snake_case : Optional[bool] = None , __snake_case : Optional[str] = "pil" , __snake_case : bool = True , ) -> Union[ImagePipelineOutput, Tuple]: self.check_inputs(__snake_case ) # 2. Preprocess image UpperCAmelCase : int = preprocess(__snake_case ) # 3. set timesteps self.scheduler.set_timesteps(__snake_case , device=self.device ) UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.get_timesteps(__snake_case , __snake_case , self.device ) UpperCAmelCase : List[Any] = timesteps[:1].repeat(__snake_case ) # 4. Prepare latent variables UpperCAmelCase : Union[str, Any] = self.prepare_latents(__snake_case , __snake_case , __snake_case , self.unet.dtype , self.device , __snake_case ) UpperCAmelCase : Dict = latents # 5. Denoising loop for t in self.progress_bar(__snake_case ): # 1. predict noise model_output UpperCAmelCase : List[Any] = self.unet(__snake_case , __snake_case ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 UpperCAmelCase : Any = self.scheduler.step( __snake_case , __snake_case , __snake_case , eta=__snake_case , use_clipped_model_output=__snake_case , generator=__snake_case , ).prev_sample UpperCAmelCase : Optional[int] = (image / 2 + 0.5).clamp(0 , 1 ) UpperCAmelCase : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": UpperCAmelCase : Dict = self.numpy_to_pil(__snake_case ) if not return_dict: return (image, latent_timestep.item()) return ImagePipelineOutput(images=__snake_case )
23
'''simple docstring''' import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = {'vocab_file': 'spiece.model'} UpperCAmelCase_ = { 'vocab_file': { 'TsinghuaAI/CPM-Generate': 'https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model', } } class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' def __init__( self : Dict , _UpperCAmelCase : str , _UpperCAmelCase : Any=False , _UpperCAmelCase : int=True , _UpperCAmelCase : Union[str, Any]=False , _UpperCAmelCase : Dict="<s>" , _UpperCAmelCase : int="</s>" , _UpperCAmelCase : Dict="<unk>" , _UpperCAmelCase : Tuple="<sep>" , _UpperCAmelCase : List[Any]="<pad>" , _UpperCAmelCase : int="<cls>" , _UpperCAmelCase : Union[str, Any]="<mask>" , _UpperCAmelCase : List[str]=["<eop>", "<eod>"] , _UpperCAmelCase : Optional[Dict[str, Any]] = None , **_UpperCAmelCase : int , ): """simple docstring""" UpperCAmelCase__ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token UpperCAmelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=_UpperCAmelCase , remove_space=_UpperCAmelCase , keep_accents=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , ) UpperCAmelCase__ = 3 UpperCAmelCase__ = do_lower_case UpperCAmelCase__ = remove_space UpperCAmelCase__ = keep_accents UpperCAmelCase__ = vocab_file UpperCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_UpperCAmelCase ) try: import jieba except ModuleNotFoundError as error: raise error.__class__( """You need to install jieba to use CpmTokenizer or CpmTokenizerFast. """ """See https://pypi.org/project/jieba/ for installation.""" ) UpperCAmelCase__ = jieba UpperCAmelCase__ = str.maketrans(""" \n""" , """\u2582\u2583""" ) @property # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" return len(self.sp_model ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" UpperCAmelCase__ = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Dict ): """simple docstring""" UpperCAmelCase__ = self.__dict__.copy() UpperCAmelCase__ = None return state def __setstate__( self : Union[str, Any] , _UpperCAmelCase : Union[str, Any] ): """simple docstring""" UpperCAmelCase__ = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): UpperCAmelCase__ = {} UpperCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : Optional[Any] ): """simple docstring""" if self.remove_space: UpperCAmelCase__ = """ """.join(inputs.strip().split() ) else: UpperCAmelCase__ = inputs UpperCAmelCase__ = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" ) if not self.keep_accents: UpperCAmelCase__ = unicodedata.normalize("""NFKD""" , _UpperCAmelCase ) UpperCAmelCase__ = """""".join([c for c in outputs if not unicodedata.combining(_UpperCAmelCase )] ) if self.do_lower_case: UpperCAmelCase__ = outputs.lower() return outputs def SCREAMING_SNAKE_CASE__ ( self : Tuple , _UpperCAmelCase : str ): """simple docstring""" UpperCAmelCase__ = self.preprocess_text(_UpperCAmelCase ) UpperCAmelCase__ = self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase ) UpperCAmelCase__ = [] for piece in pieces: if len(_UpperCAmelCase ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit(): UpperCAmelCase__ = self.sp_model.EncodeAsPieces(piece[:-1].replace(_UpperCAmelCase , """""" ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: UpperCAmelCase__ = cur_pieces[1:] else: UpperCAmelCase__ = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(_UpperCAmelCase ) else: new_pieces.append(_UpperCAmelCase ) return new_pieces def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : Union[str, Any] ): """simple docstring""" return self.sp_model.PieceToId(_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : Any ): """simple docstring""" return self.sp_model.IdToPiece(_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : Dict ): """simple docstring""" UpperCAmelCase__ = """""".join(_UpperCAmelCase ).replace(_UpperCAmelCase , """ """ ).strip() return out_string def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ): """simple docstring""" UpperCAmelCase__ = [self.sep_token_id] UpperCAmelCase__ = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None , _UpperCAmelCase : bool = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase ) if token_ids_a is not None: return ([0] * len(_UpperCAmelCase )) + [1] + ([0] * len(_UpperCAmelCase )) + [1, 1] return ([0] * len(_UpperCAmelCase )) + [1, 1] def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ): """simple docstring""" UpperCAmelCase__ = [self.sep_token_id] UpperCAmelCase__ = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ): """simple docstring""" if not os.path.isdir(_UpperCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return UpperCAmelCase__ = os.path.join( _UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(_UpperCAmelCase , """wb""" ) as fi: UpperCAmelCase__ = self.sp_model.serialized_model_proto() fi.write(_UpperCAmelCase ) return (out_vocab_file,) def SCREAMING_SNAKE_CASE__ ( self : Tuple , *_UpperCAmelCase : Tuple , **_UpperCAmelCase : Optional[Any] ): """simple docstring""" UpperCAmelCase__ = super()._decode(*_UpperCAmelCase , **_UpperCAmelCase ) UpperCAmelCase__ = text.replace(""" """ , """""" ).replace("""\u2582""" , """ """ ).replace("""\u2583""" , """\n""" ) return text
346
0
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ): A_ : List[str] = ['image_processor', 'tokenizer'] A_ : Optional[Any] = 'CLIPImageProcessor' A_ : Any = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast') def __init__(self : int , a__ : int=None , a__ : Dict=None , **a__ : List[str] ): """simple docstring""" __snake_case = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , a__ , ) __snake_case = kwargs.pop('''feature_extractor''' ) __snake_case = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(a__ , a__ ) def __call__(self : Any , a__ : Dict=None , a__ : List[str]=None , a__ : Dict=None , **a__ : Tuple ): """simple docstring""" if text is None and images is None: raise ValueError('''You have to specify either text or images. Both cannot be none.''' ) if text is not None: __snake_case = self.tokenizer(a__ , return_tensors=a__ , **a__ ) if images is not None: __snake_case = self.image_processor(a__ , return_tensors=a__ , **a__ ) if text is not None and images is not None: __snake_case = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**a__ ) , tensor_type=a__ ) def a (self : Union[str, Any] , *a__ : int , **a__ : List[Any] ): """simple docstring""" return self.tokenizer.batch_decode(*a__ , **a__ ) def a (self : Any , *a__ : List[Any] , **a__ : List[str] ): """simple docstring""" return self.tokenizer.decode(*a__ , **a__ ) @property def a (self : int ): """simple docstring""" __snake_case = self.tokenizer.model_input_names __snake_case = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
24
'''simple docstring''' import argparse import logging import os import datasets import tensorflow as tf from transformers import AutoTokenizer UpperCAmelCase_ = logging.getLogger(__name__) def _UpperCamelCase ( ): '''simple docstring''' UpperCAmelCase__ = argparse.ArgumentParser( description="""Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.""" ) parser.add_argument( """--dataset_name""" , type=SCREAMING_SNAKE_CASE__ , default="""wikitext""" , help="""Name of the training. Explore datasets at: hf.co/datasets.""" , ) parser.add_argument( """--dataset_config""" , type=SCREAMING_SNAKE_CASE__ , default="""wikitext-103-raw-v1""" , help="""Configuration name of the dataset.""" ) parser.add_argument( """--tokenizer_name_or_path""" , type=SCREAMING_SNAKE_CASE__ , default="""sayakpaul/unigram-tokenizer-wikitext""" , help="""Tokenizer identifier. Can be a local filepath or a Hub identifier.""" , ) parser.add_argument( """--shard_size""" , type=SCREAMING_SNAKE_CASE__ , default=1000 , help="""Number of entries to go in a single shard.""" , ) parser.add_argument("""--split""" , type=SCREAMING_SNAKE_CASE__ , default="""train""" , choices=["""train""", """test""", """validation"""] ) parser.add_argument( """--limit""" , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , help="""Limit the number of shards (used for debugging).""" , ) parser.add_argument( """--max_length""" , type=SCREAMING_SNAKE_CASE__ , default=512 , help="""Maximum sequence length. For training on TPUs, it helps to have a maximum""" """ sequence length that is a multiple of 8.""" , ) parser.add_argument( """--output_dir""" , default="""tf-tpu""" , type=SCREAMING_SNAKE_CASE__ , help="""Output directory where the TFRecord shards will be saved. If the""" """ path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord""" """ shards will be directly saved to a Google Cloud Storage bucket.""" , ) UpperCAmelCase__ = parser.parse_args() return args def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Any ): '''simple docstring''' def fn(SCREAMING_SNAKE_CASE__ : Union[str, Any] ): return tokenizer(examples["""text"""] ) return fn def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] ): '''simple docstring''' UpperCAmelCase__ = [] for i in range(len(tokenized_data["""input_ids"""] ) ): UpperCAmelCase__ = { """input_ids""": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["""input_ids"""][i] ) ), """attention_mask""": tf.train.Feature( intaa_list=tf.train.IntaaList(value=tokenized_data["""attention_mask"""][i] ) ), } UpperCAmelCase__ = tf.train.Features(feature=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = tf.train.Example(features=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = example.SerializeToString() records.append(SCREAMING_SNAKE_CASE__ ) return records def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Any ): '''simple docstring''' UpperCAmelCase__ = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split ) if args.limit is not None: UpperCAmelCase__ = min(len(SCREAMING_SNAKE_CASE__ ) , args.limit ) UpperCAmelCase__ = dataset.select(range(SCREAMING_SNAKE_CASE__ ) ) print(F'''Limiting the dataset to {args.limit} entries.''' ) UpperCAmelCase__ = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path ) # Handle output directory creation. # For serializing into a Google Cloud Storage Bucket, one needs to first # create a bucket. if "gs" not in args.output_dir: if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) UpperCAmelCase__ = os.path.join(args.output_dir , args.split ) if not os.path.exists(SCREAMING_SNAKE_CASE__ ): os.makedirs(SCREAMING_SNAKE_CASE__ ) else: UpperCAmelCase__ = os.path.join(args.output_dir , args.split ) # Tokenize the whole dataset at once. UpperCAmelCase__ = tokenize_function(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = dataset.map(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , num_proc=4 , remove_columns=["""text"""] ) # We need to concatenate all our texts together, and then split the result # into chunks of a fixed size, which we will call block_size. To do this, we # will use the map method again, with the option batched=True. When we use batched=True, # the function we pass to map() will be passed multiple inputs at once, allowing us # to group them into more or fewer examples than we had in the input. # This allows us to create our new fixed-length samples. The advantage of this # method is that we don't lose a whole lot of content from the dataset compared to the # case where we simply tokenize with a pre-defined max_length. def group_texts(SCREAMING_SNAKE_CASE__ : int ): # Concatenate all texts. UpperCAmelCase__ = {k: sum(examples[k] , [] ) for k in examples.keys()} UpperCAmelCase__ = len(concatenated_examples[list(examples.keys() )[0]] ) # We drop the small remainder, though you could add padding instead if the model supports it # In this, as in all things, we advise you to follow your heart 🫀 UpperCAmelCase__ = (total_length // args.max_length) * args.max_length # Split by chunks of max_len. UpperCAmelCase__ = { k: [t[i : i + args.max_length] for i in range(0 , SCREAMING_SNAKE_CASE__ , args.max_length )] for k, t in concatenated_examples.items() } return result UpperCAmelCase__ = dataset_tokenized.map(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , batch_size=1000 , num_proc=4 ) UpperCAmelCase__ = 0 UpperCAmelCase__ = 0 for shard in range(0 , len(SCREAMING_SNAKE_CASE__ ) , args.shard_size ): UpperCAmelCase__ = grouped_dataset[shard : shard + args.shard_size] UpperCAmelCase__ = len(dataset_snapshot["""input_ids"""] ) UpperCAmelCase__ = os.path.join(SCREAMING_SNAKE_CASE__ , F'''dataset-{shard_count}-{records_containing}.tfrecord''' ) UpperCAmelCase__ = get_serialized_examples(SCREAMING_SNAKE_CASE__ ) with tf.io.TFRecordWriter(SCREAMING_SNAKE_CASE__ ) as out_file: for i in range(len(SCREAMING_SNAKE_CASE__ ) ): UpperCAmelCase__ = serialized_examples[i] out_file.write(SCREAMING_SNAKE_CASE__ ) print("""Wrote file {} containing {} records""".format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) shard_count += 1 total_records += records_containing with open(F'''split-{args.split}-records-count.txt''' , """w""" ) as f: print(F'''Total {args.split} records: {total_records}''' , file=SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": UpperCAmelCase_ = parse_args() main(args)
346
0
"""simple docstring""" import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import ( AutoProcessor, BertTokenizerFast, BlipImageProcessor, GPTaTokenizer, InstructBlipProcessor, PreTrainedTokenizerFast, ) @require_vision class lowerCAmelCase_ (unittest.TestCase ): """simple docstring""" def __magic_name__ (self ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = tempfile.mkdtemp() SCREAMING_SNAKE_CASE__ : List[Any] = BlipImageProcessor() SCREAMING_SNAKE_CASE__ : Optional[Any] = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = BertTokenizerFast.from_pretrained("""hf-internal-testing/tiny-random-bert""" ) SCREAMING_SNAKE_CASE__ : str = InstructBlipProcessor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) processor.save_pretrained(self.tmpdirname ) def __magic_name__ (self , **SCREAMING_SNAKE_CASE__ ) -> Any: """simple docstring""" return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ).tokenizer def __magic_name__ (self , **SCREAMING_SNAKE_CASE__ ) -> int: """simple docstring""" return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ).image_processor def __magic_name__ (self , **SCREAMING_SNAKE_CASE__ ) -> Dict: """simple docstring""" return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ).qformer_tokenizer def __magic_name__ (self ) -> int: """simple docstring""" shutil.rmtree(self.tmpdirname ) def __magic_name__ (self ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] SCREAMING_SNAKE_CASE__ : int = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE__ , 0 , -1 ) ) for x in image_inputs] return image_inputs def __magic_name__ (self ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = InstructBlipProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , ) processor.save_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE__ : str = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) SCREAMING_SNAKE_CASE__ : Any = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 ) SCREAMING_SNAKE_CASE__ : List[Any] = InstructBlipProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE__ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE__ ) self.assertIsInstance(processor.qformer_tokenizer , SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_image_processor() SCREAMING_SNAKE_CASE__ : Any = self.get_tokenizer() SCREAMING_SNAKE_CASE__ : Tuple = self.get_qformer_tokenizer() SCREAMING_SNAKE_CASE__ : str = InstructBlipProcessor( tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ , qformer_tokenizer=SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : List[Any] = self.prepare_image_inputs() SCREAMING_SNAKE_CASE__ : Union[str, Any] = image_processor(SCREAMING_SNAKE_CASE__ , return_tensors="""np""" ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def __magic_name__ (self ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_image_processor() SCREAMING_SNAKE_CASE__ : str = self.get_tokenizer() SCREAMING_SNAKE_CASE__ : str = self.get_qformer_tokenizer() SCREAMING_SNAKE_CASE__ : str = InstructBlipProcessor( tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ , qformer_tokenizer=SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = """lower newer""" SCREAMING_SNAKE_CASE__ : int = processor(text=SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Any = tokenizer(SCREAMING_SNAKE_CASE__ , return_token_type_ids=SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : int = qformer_tokenizer(SCREAMING_SNAKE_CASE__ , return_token_type_ids=SCREAMING_SNAKE_CASE__ ) for key in encoded_tokens.keys(): self.assertListEqual(encoded_tokens[key] , encoded_processor[key] ) for key in encoded_tokens_qformer.keys(): self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["""qformer_""" + key] ) def __magic_name__ (self ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_image_processor() SCREAMING_SNAKE_CASE__ : Dict = self.get_tokenizer() SCREAMING_SNAKE_CASE__ : int = self.get_qformer_tokenizer() SCREAMING_SNAKE_CASE__ : Optional[Any] = InstructBlipProcessor( tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ , qformer_tokenizer=SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Optional[int] = """lower newer""" SCREAMING_SNAKE_CASE__ : Optional[int] = self.prepare_image_inputs() SCREAMING_SNAKE_CASE__ : Any = processor(text=SCREAMING_SNAKE_CASE__ , images=SCREAMING_SNAKE_CASE__ ) self.assertListEqual( list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , ) # test if it raises when no input is passed with pytest.raises(SCREAMING_SNAKE_CASE__ ): processor() def __magic_name__ (self ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = self.get_image_processor() SCREAMING_SNAKE_CASE__ : int = self.get_tokenizer() SCREAMING_SNAKE_CASE__ : Any = self.get_qformer_tokenizer() SCREAMING_SNAKE_CASE__ : Union[str, Any] = InstructBlipProcessor( tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ , qformer_tokenizer=SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] SCREAMING_SNAKE_CASE__ : Optional[int] = processor.batch_decode(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_image_processor() SCREAMING_SNAKE_CASE__ : Dict = self.get_tokenizer() SCREAMING_SNAKE_CASE__ : Tuple = self.get_qformer_tokenizer() SCREAMING_SNAKE_CASE__ : List[str] = InstructBlipProcessor( tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ , qformer_tokenizer=SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Optional[int] = """lower newer""" SCREAMING_SNAKE_CASE__ : Tuple = self.prepare_image_inputs() SCREAMING_SNAKE_CASE__ : Any = processor(text=SCREAMING_SNAKE_CASE__ , images=SCREAMING_SNAKE_CASE__ ) self.assertListEqual( list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
25
'''simple docstring''' import numpy as np import torch from torch.nn import CrossEntropyLoss from transformers import AutoModelForCausalLM, AutoTokenizer import datasets from datasets import logging UpperCAmelCase_ = '\\n\n' UpperCAmelCase_ = '\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n' UpperCAmelCase_ = '\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to \'cuda\' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 78.22\n >>> print(round(results["perplexities"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = datasets.load_dataset("wikitext",\n ... "wikitext-2-raw-v1",\n ... split="test")["text"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!=\'\']\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 60.35\n >>> print(round(results["perplexities"][0], 2))\n 81.12\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase_ ( datasets.Metric ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Dict ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """input_texts""": datasets.Value("""string""" ), } ) , reference_urls=["""https://huggingface.co/docs/transformers/perplexity"""] , ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : int = 16 , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[int]=None ): """simple docstring""" if device is not None: assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu." if device == "gpu": UpperCAmelCase__ = """cuda""" else: UpperCAmelCase__ = """cuda""" if torch.cuda.is_available() else """cpu""" UpperCAmelCase__ = AutoModelForCausalLM.from_pretrained(_UpperCAmelCase ) UpperCAmelCase__ = model.to(_UpperCAmelCase ) UpperCAmelCase__ = AutoTokenizer.from_pretrained(_UpperCAmelCase ) # if batch_size > 1 (which generally leads to padding being required), and # if there is not an already assigned pad_token, assign an existing # special token to also be the padding token if tokenizer.pad_token is None and batch_size > 1: UpperCAmelCase__ = list(tokenizer.special_tokens_map_extended.values() ) # check that the model already has at least one special token defined assert ( len(_UpperCAmelCase ) > 0 ), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1." # assign one of the special tokens to also be the pad token tokenizer.add_special_tokens({"""pad_token""": existing_special_tokens[0]} ) if add_start_token: # leave room for <BOS> token to be added: assert ( tokenizer.bos_token is not None ), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False" UpperCAmelCase__ = model.config.max_length - 1 else: UpperCAmelCase__ = model.config.max_length UpperCAmelCase__ = tokenizer( _UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , return_tensors="""pt""" , return_attention_mask=_UpperCAmelCase , ).to(_UpperCAmelCase ) UpperCAmelCase__ = encodings["""input_ids"""] UpperCAmelCase__ = encodings["""attention_mask"""] # check that each input is long enough: if add_start_token: assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long." else: assert torch.all( torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings." UpperCAmelCase__ = [] UpperCAmelCase__ = CrossEntropyLoss(reduction="""none""" ) for start_index in logging.tqdm(range(0 , len(_UpperCAmelCase ) , _UpperCAmelCase ) ): UpperCAmelCase__ = min(start_index + batch_size , len(_UpperCAmelCase ) ) UpperCAmelCase__ = encoded_texts[start_index:end_index] UpperCAmelCase__ = attn_masks[start_index:end_index] if add_start_token: UpperCAmelCase__ = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(_UpperCAmelCase ) UpperCAmelCase__ = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 ) UpperCAmelCase__ = torch.cat( [torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(_UpperCAmelCase ), attn_mask] , dim=1 ) UpperCAmelCase__ = encoded_batch with torch.no_grad(): UpperCAmelCase__ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase ).logits UpperCAmelCase__ = out_logits[..., :-1, :].contiguous() UpperCAmelCase__ = labels[..., 1:].contiguous() UpperCAmelCase__ = attn_mask[..., 1:].contiguous() UpperCAmelCase__ = torch.expa( (loss_fct(shift_logits.transpose(1 , 2 ) , _UpperCAmelCase ) * shift_attention_mask_batch).sum(1 ) / shift_attention_mask_batch.sum(1 ) ) ppls += perplexity_batch.tolist() return {"perplexities": ppls, "mean_perplexity": np.mean(_UpperCAmelCase )}
346
0
from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging _snake_case = logging.get_logger(__name__) class lowercase ( UpperCamelCase__ ): _a = ["audio_values", "audio_mask"] def __init__( self , _a=2048 , _a=1 , _a=[16, 16] , _a=128 , _a=4_4100 , _a=86 , _a=2048 , _a=0.0 , **_a , ) -> List[Any]: super().__init__( feature_size=_a , sampling_rate=_a , padding_value=_a , **_a , ) _A : Any = spectrogram_length _A : Dict = num_channels _A : Optional[Any] = patch_size _A : str = feature_size // self.patch_size[1] _A : List[Any] = n_fft _A : Optional[Any] = sampling_rate // hop_length_to_sampling_rate _A : List[str] = sampling_rate _A : Union[str, Any] = padding_value _A : List[str] = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=_a , min_frequency=0.0 , max_frequency=22050.0 , sampling_rate=_a , norm="""slaney""" , mel_scale="""slaney""" , ).T def a__ ( self , _a ) -> np.ndarray: _A : Optional[int] = spectrogram( _a , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="""dB""" , db_range=80.0 , ) _A : Tuple = log_spec[:, :-1] _A : int = log_spec - 20.0 _A : Tuple = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0 return log_spec def __call__( self , _a , _a = None , _a = True , _a = None , _a = False , _a = False , **_a , ) -> BatchFeature: if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( """This feature extractor is set to support sampling rate""" F''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled''' F''' with {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( """It is strongly recommended to pass the `sampling_rate` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""" ) _A : int = isinstance(_a , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) _A : Optional[Any] = is_batched_numpy or ( isinstance(_a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: _A : Union[str, Any] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(_a , np.ndarray ): _A : str = np.asarray(_a , dtype=np.floataa ) elif isinstance(_a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): _A : Dict = raw_speech.astype(np.floataa ) # always return batch if not is_batched: _A : Dict = [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis _A : Union[str, Any] = [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0] , _a ): _A : Optional[int] = [np.asarray(_a , dtype=np.floataa ) for feature in audio_features] # Create audio attention mask _A : Any = max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: _A : str = [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] _A : List[str] = np.array(_a ).astype(np.floataa ) # convert into correct format for padding _A : Tuple = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch _A : int = np.ones([len(_a ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) _A : Optional[int] = padded_audio_features * self.padding_value for i in range(len(_a ) ): _A : Union[str, Any] = audio_features[i] _A : Optional[Any] = feature # return as BatchFeature if return_attention_mask: _A : Optional[Any] = {"""audio_values""": padded_audio_features, """audio_mask""": audio_mask} else: _A : Any = {"""audio_values""": padded_audio_features} _A : int = BatchFeature(data=_a , tensor_type=_a ) return encoded_inputs
26
'''simple docstring''' def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int = 1000000 ): '''simple docstring''' UpperCAmelCase__ = [i - 1 for i in range(limit + 1 )] for i in range(2 , limit + 1 ): if phi[i] == i - 1: for j in range(2 * i , limit + 1 , SCREAMING_SNAKE_CASE__ ): phi[j] -= phi[j] // i return sum(phi[2 : limit + 1] ) if __name__ == "__main__": print(solution())
346
0
'''simple docstring''' from ...utils import ( OptionalDependencyNotAvailable, is_flax_available, is_torch_available, is_transformers_available, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .multicontrolnet import MultiControlNetModel from .pipeline_controlnet import StableDiffusionControlNetPipeline from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline if is_transformers_available() and is_flax_available(): from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
27
'''simple docstring''' from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING UpperCAmelCase_ = logging.get_logger(__name__) @add_end_docstrings(lowerCamelCase_ ) class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' def __init__( self : Optional[Any] , *_UpperCAmelCase : Union[str, Any] , **_UpperCAmelCase : Dict ): """simple docstring""" super().__init__(*_UpperCAmelCase , **_UpperCAmelCase ) requires_backends(self , """vision""" ) self.check_model_type( TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING if self.framework == """tf""" else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING ) def SCREAMING_SNAKE_CASE__ ( self : str , _UpperCAmelCase : List[Any]=None ): """simple docstring""" UpperCAmelCase__ = {} if top_k is not None: UpperCAmelCase__ = top_k return {}, {}, postprocess_params def __call__( self : Any , _UpperCAmelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_UpperCAmelCase : str ): """simple docstring""" return super().__call__(_UpperCAmelCase , **_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , _UpperCAmelCase : Tuple ): """simple docstring""" UpperCAmelCase__ = load_image(_UpperCAmelCase ) UpperCAmelCase__ = self.image_processor(images=_UpperCAmelCase , return_tensors=self.framework ) return model_inputs def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : Tuple ): """simple docstring""" UpperCAmelCase__ = self.model(**_UpperCAmelCase ) return model_outputs def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : str=5 ): """simple docstring""" if top_k > self.model.config.num_labels: UpperCAmelCase__ = self.model.config.num_labels if self.framework == "pt": UpperCAmelCase__ = model_outputs.logits.softmax(-1 )[0] UpperCAmelCase__ , UpperCAmelCase__ = probs.topk(_UpperCAmelCase ) elif self.framework == "tf": UpperCAmelCase__ = stable_softmax(model_outputs.logits , axis=-1 )[0] UpperCAmelCase__ = tf.math.top_k(_UpperCAmelCase , k=_UpperCAmelCase ) UpperCAmelCase__ , UpperCAmelCase__ = topk.values.numpy(), topk.indices.numpy() else: raise ValueError(f'''Unsupported framework: {self.framework}''' ) UpperCAmelCase__ = scores.tolist() UpperCAmelCase__ = ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCAmelCase , _UpperCAmelCase )]
346
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _lowerCamelCase : List[Any] = { "configuration_m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config", "M2M100OnnxConfig"], "tokenization_m2m_100": ["M2M100Tokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : int = [ "M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST", "M2M100ForConditionalGeneration", "M2M100Model", "M2M100PreTrainedModel", ] if TYPE_CHECKING: from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig from .tokenization_mam_aaa import MaMaaaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mam_aaa import ( M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST, MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaPreTrainedModel, ) else: import sys _lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
28
'''simple docstring''' from math import factorial def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int = 20 ): '''simple docstring''' UpperCAmelCase__ = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1, # 2, 3,... UpperCAmelCase__ = n // 2 return int(factorial(SCREAMING_SNAKE_CASE__ ) / (factorial(SCREAMING_SNAKE_CASE__ ) * factorial(n - k )) ) if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution(2_0)) else: try: UpperCAmelCase_ = int(sys.argv[1]) print(solution(n)) except ValueError: print('Invalid entry - please enter a number.')
346
0
from io import BytesIO from typing import List, Union import requests from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_decord_available(): import numpy as np from decord import VideoReader if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING __UpperCAmelCase = logging.get_logger(__name__) @add_end_docstrings(_snake_case ) class lowerCamelCase (_snake_case ): '''simple docstring''' def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> List[Any]: super().__init__(*_UpperCamelCase , **_UpperCamelCase ) requires_backends(self , 'decord' ) self.check_model_type(_UpperCamelCase ) def __UpperCAmelCase ( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None ) -> Dict: UpperCAmelCase_ : Optional[Any] = {} if frame_sampling_rate is not None: UpperCAmelCase_ : Optional[Any] = frame_sampling_rate if num_frames is not None: UpperCAmelCase_ : Optional[Any] = num_frames UpperCAmelCase_ : Optional[Any] = {} if top_k is not None: UpperCAmelCase_ : List[Any] = top_k return preprocess_params, {}, postprocess_params def __call__( self , _UpperCamelCase , **_UpperCamelCase ) -> Union[str, Any]: return super().__call__(_UpperCamelCase , **_UpperCamelCase ) def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=1 ) -> Optional[int]: if num_frames is None: UpperCAmelCase_ : Optional[Any] = self.model.config.num_frames if video.startswith('http://' ) or video.startswith('https://' ): UpperCAmelCase_ : List[str] = BytesIO(requests.get(_UpperCamelCase ).content ) UpperCAmelCase_ : Optional[Any] = VideoReader(_UpperCamelCase ) videoreader.seek(0 ) UpperCAmelCase_ : List[Any] = 0 UpperCAmelCase_ : Union[str, Any] = num_frames * frame_sampling_rate - 1 UpperCAmelCase_ : Optional[int] = np.linspace(_UpperCamelCase , _UpperCamelCase , num=_UpperCamelCase , dtype=np.intaa ) UpperCAmelCase_ : List[Any] = videoreader.get_batch(_UpperCamelCase ).asnumpy() UpperCAmelCase_ : Union[str, Any] = list(_UpperCamelCase ) UpperCAmelCase_ : List[Any] = self.image_processor(_UpperCamelCase , return_tensors=self.framework ) return model_inputs def __UpperCAmelCase ( self , _UpperCamelCase ) -> List[Any]: UpperCAmelCase_ : Dict = self.model(**_UpperCamelCase ) return model_outputs def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=5 ) -> List[str]: if top_k > self.model.config.num_labels: UpperCAmelCase_ : List[Any] = self.model.config.num_labels if self.framework == "pt": UpperCAmelCase_ : Dict = model_outputs.logits.softmax(-1 )[0] UpperCAmelCase_ , UpperCAmelCase_ : Tuple = probs.topk(_UpperCamelCase ) else: raise ValueError(f"Unsupported framework: {self.framework}" ) UpperCAmelCase_ : str = scores.tolist() UpperCAmelCase_ : Any = ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCamelCase , _UpperCamelCase )]
29
'''simple docstring''' import json import os import unittest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ : int = MgpstrTokenizer lowerCAmelCase_ : List[str] = False lowerCAmelCase_ : Optional[int] = {} lowerCAmelCase_ : Any = False def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): """simple docstring""" super().setUp() # fmt: off UpperCAmelCase__ = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""] # fmt: on UpperCAmelCase__ = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) ) UpperCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(_UpperCAmelCase ) + """\n""" ) def SCREAMING_SNAKE_CASE__ ( self : List[str] , **_UpperCAmelCase : Optional[Any] ): """simple docstring""" return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : Tuple ): """simple docstring""" UpperCAmelCase__ = """tester""" UpperCAmelCase__ = """tester""" return input_text, output_text @unittest.skip("""MGP-STR always lower cases letters.""" ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" pass def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" UpperCAmelCase__ = self.get_tokenizers(do_lower_case=_UpperCAmelCase ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): UpperCAmelCase__ = """[SPECIAL_TOKEN]""" tokenizer.add_special_tokens({"""cls_token""": special_token} ) UpperCAmelCase__ = tokenizer.encode([special_token] , add_special_tokens=_UpperCAmelCase ) self.assertEqual(len(_UpperCAmelCase ) , 1 ) UpperCAmelCase__ = tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase ) self.assertTrue(special_token not in decoded ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" UpperCAmelCase__ = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): UpperCAmelCase__ , UpperCAmelCase__ = self.get_input_output_texts(_UpperCAmelCase ) UpperCAmelCase__ = tokenizer.tokenize(_UpperCAmelCase ) UpperCAmelCase__ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) UpperCAmelCase__ = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = tokenizer.convert_ids_to_tokens(_UpperCAmelCase ) self.assertNotEqual(len(_UpperCAmelCase ) , 0 ) UpperCAmelCase__ = tokenizer.decode(_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase ) self.assertEqual(text_a.replace(""" """ , """""" ) , _UpperCAmelCase ) @unittest.skip("""MGP-STR tokenizer only handles one sequence.""" ) def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" pass @unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""" ) def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" pass
346
0
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
30
'''simple docstring''' from abc import ABC, abstractmethod from typing import List, Optional class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' def __init__( self : Union[str, Any] ): """simple docstring""" self.test() def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" UpperCAmelCase__ = 0 UpperCAmelCase__ = False while not completed: if counter == 1: self.reset() UpperCAmelCase__ = self.advance() if not self.does_advance(_UpperCAmelCase ): raise Exception( """Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.""" ) UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.update(_UpperCAmelCase ) counter += 1 if counter > 1_00_00: raise Exception("""update() does not fulfill the constraint.""" ) if self.remaining() != 0: raise Exception("""Custom Constraint is not defined correctly.""" ) @abstractmethod def SCREAMING_SNAKE_CASE__ ( self : Dict ): """simple docstring""" raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : int ): """simple docstring""" raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def SCREAMING_SNAKE_CASE__ ( self : Any , _UpperCAmelCase : int ): """simple docstring""" raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): """simple docstring""" raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : List[Any]=False ): """simple docstring""" raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' def __init__( self : Union[str, Any] , _UpperCAmelCase : List[int] ): """simple docstring""" super(_UpperCAmelCase , self ).__init__() if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or len(_UpperCAmelCase ) == 0: raise ValueError(f'''`token_ids` has to be a non-empty list, but is {token_ids}.''' ) if any((not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or token_id < 0) for token_id in token_ids ): raise ValueError(f'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' ) UpperCAmelCase__ = token_ids UpperCAmelCase__ = len(self.token_ids ) UpperCAmelCase__ = -1 # the index of the currently fulfilled step UpperCAmelCase__ = False def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" if self.completed: return None return self.token_ids[self.fulfilled_idx + 1] def SCREAMING_SNAKE_CASE__ ( self : List[str] , _UpperCAmelCase : int ): """simple docstring""" if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(_UpperCAmelCase )}''' ) if self.completed: return False return token_id == self.token_ids[self.fulfilled_idx + 1] def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , _UpperCAmelCase : int ): """simple docstring""" if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(_UpperCAmelCase )}''' ) UpperCAmelCase__ = False UpperCAmelCase__ = False UpperCAmelCase__ = False if self.does_advance(_UpperCAmelCase ): self.fulfilled_idx += 1 UpperCAmelCase__ = True if self.fulfilled_idx == (self.seqlen - 1): UpperCAmelCase__ = True UpperCAmelCase__ = completed else: # failed to make progress. UpperCAmelCase__ = True self.reset() return stepped, completed, reset def SCREAMING_SNAKE_CASE__ ( self : Tuple ): """simple docstring""" UpperCAmelCase__ = False UpperCAmelCase__ = 0 def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" return self.seqlen - (self.fulfilled_idx + 1) def SCREAMING_SNAKE_CASE__ ( self : Any , _UpperCAmelCase : Optional[int]=False ): """simple docstring""" UpperCAmelCase__ = PhrasalConstraint(self.token_ids ) if stateful: UpperCAmelCase__ = self.seqlen UpperCAmelCase__ = self.fulfilled_idx UpperCAmelCase__ = self.completed return new_constraint class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Any , _UpperCAmelCase : List[List[int]] , _UpperCAmelCase : List[str]=True ): """simple docstring""" UpperCAmelCase__ = max([len(_UpperCAmelCase ) for one in nested_token_ids] ) UpperCAmelCase__ = {} for token_ids in nested_token_ids: UpperCAmelCase__ = root for tidx, token_id in enumerate(_UpperCAmelCase ): if token_id not in level: UpperCAmelCase__ = {} UpperCAmelCase__ = level[token_id] if no_subsets and self.has_subsets(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError( """Each list in `nested_token_ids` can't be a complete subset of another list, but is""" f''' {nested_token_ids}.''' ) UpperCAmelCase__ = root def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : int ): """simple docstring""" UpperCAmelCase__ = self.trie for current_token in current_seq: UpperCAmelCase__ = start[current_token] UpperCAmelCase__ = list(start.keys() ) return next_tokens def SCREAMING_SNAKE_CASE__ ( self : str , _UpperCAmelCase : Tuple ): """simple docstring""" UpperCAmelCase__ = self.next_tokens(_UpperCAmelCase ) return len(_UpperCAmelCase ) == 0 def SCREAMING_SNAKE_CASE__ ( self : List[str] , _UpperCAmelCase : Optional[int] ): """simple docstring""" UpperCAmelCase__ = list(root.values() ) if len(_UpperCAmelCase ) == 0: return 1 else: return sum([self.count_leaves(_UpperCAmelCase ) for nn in next_nodes] ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict ): """simple docstring""" UpperCAmelCase__ = self.count_leaves(_UpperCAmelCase ) return len(_UpperCAmelCase ) != leaf_count class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' def __init__( self : Dict , _UpperCAmelCase : List[List[int]] ): """simple docstring""" super(_UpperCAmelCase , self ).__init__() if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or len(_UpperCAmelCase ) == 0: raise ValueError(f'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' ) if any(not isinstance(_UpperCAmelCase , _UpperCAmelCase ) for token_ids in nested_token_ids ): raise ValueError(f'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' ) if any( any((not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or token_id < 0) for token_id in token_ids ) for token_ids in nested_token_ids ): raise ValueError( f'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' ) UpperCAmelCase__ = DisjunctiveTrie(_UpperCAmelCase ) UpperCAmelCase__ = nested_token_ids UpperCAmelCase__ = self.trie.max_height UpperCAmelCase__ = [] UpperCAmelCase__ = False def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" UpperCAmelCase__ = self.trie.next_tokens(self.current_seq ) if len(_UpperCAmelCase ) == 0: return None else: return token_list def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : int ): """simple docstring""" if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(_UpperCAmelCase )}''' ) UpperCAmelCase__ = self.trie.next_tokens(self.current_seq ) return token_id in next_tokens def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : int ): """simple docstring""" if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(_UpperCAmelCase )}''' ) UpperCAmelCase__ = False UpperCAmelCase__ = False UpperCAmelCase__ = False if self.does_advance(_UpperCAmelCase ): self.current_seq.append(_UpperCAmelCase ) UpperCAmelCase__ = True else: UpperCAmelCase__ = True self.reset() UpperCAmelCase__ = self.trie.reached_leaf(self.current_seq ) UpperCAmelCase__ = completed return stepped, completed, reset def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" UpperCAmelCase__ = False UpperCAmelCase__ = [] def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" if self.completed: # since this can be completed without reaching max height return 0 else: return self.seqlen - len(self.current_seq ) def SCREAMING_SNAKE_CASE__ ( self : Tuple , _UpperCAmelCase : Dict=False ): """simple docstring""" UpperCAmelCase__ = DisjunctiveConstraint(self.token_ids ) if stateful: UpperCAmelCase__ = self.seqlen UpperCAmelCase__ = self.current_seq UpperCAmelCase__ = self.completed return new_constraint class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Optional[int] , _UpperCAmelCase : List[Constraint] ): """simple docstring""" UpperCAmelCase__ = constraints # max # of steps required to fulfill a given constraint UpperCAmelCase__ = max([c.seqlen for c in constraints] ) UpperCAmelCase__ = len(_UpperCAmelCase ) UpperCAmelCase__ = False self.init_state() def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" UpperCAmelCase__ = [] UpperCAmelCase__ = None UpperCAmelCase__ = [constraint.copy(stateful=_UpperCAmelCase ) for constraint in self.constraints] def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" UpperCAmelCase__ = 0 if self.inprogress_constraint: # extra points for having a constraint mid-fulfilled add += self.max_seqlen - self.inprogress_constraint.remaining() return (len(self.complete_constraints ) * self.max_seqlen) + add def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): """simple docstring""" UpperCAmelCase__ = [] if self.inprogress_constraint is None: for constraint in self.pending_constraints: # "pending" == "unfulfilled yet" UpperCAmelCase__ = constraint.advance() if isinstance(_UpperCAmelCase , _UpperCAmelCase ): token_list.append(_UpperCAmelCase ) elif isinstance(_UpperCAmelCase , _UpperCAmelCase ): token_list.extend(_UpperCAmelCase ) else: UpperCAmelCase__ = self.inprogress_constraint.advance() if isinstance(_UpperCAmelCase , _UpperCAmelCase ): token_list.append(_UpperCAmelCase ) elif isinstance(_UpperCAmelCase , _UpperCAmelCase ): token_list.extend(_UpperCAmelCase ) if len(_UpperCAmelCase ) == 0: return None else: return token_list def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : Optional[List[int]] ): """simple docstring""" self.init_state() if token_ids is not None: for token in token_ids: # completes or steps **one** constraint UpperCAmelCase__ , UpperCAmelCase__ = self.add(_UpperCAmelCase ) # the entire list of constraints are fulfilled if self.completed: break def SCREAMING_SNAKE_CASE__ ( self : Any , _UpperCAmelCase : int ): """simple docstring""" if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError(f'''`token_id` should be an `int`, but is `{token_id}`.''' ) UpperCAmelCase__ , UpperCAmelCase__ = False, False if self.completed: UpperCAmelCase__ = True UpperCAmelCase__ = False return complete, stepped if self.inprogress_constraint is not None: # In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current # job, simply update the state UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.inprogress_constraint.update(_UpperCAmelCase ) if reset: # 1. If the next token breaks the progress, then we must restart. # e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books". # But that doesn't mean we self.init_state(), since we only reset the state for this particular # constraint, not the full list of constraints. self.pending_constraints.append(self.inprogress_constraint.copy(stateful=_UpperCAmelCase ) ) UpperCAmelCase__ = None if complete: # 2. If the next token completes the constraint, move it to completed list, set # inprogress to None. If there are no pending constraints either, then this full list of constraints # is complete. self.complete_constraints.append(self.inprogress_constraint ) UpperCAmelCase__ = None if len(self.pending_constraints ) == 0: # we're done! UpperCAmelCase__ = True else: # Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list # of constraints? for cidx, pending_constraint in enumerate(self.pending_constraints ): if pending_constraint.does_advance(_UpperCAmelCase ): UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = pending_constraint.update(_UpperCAmelCase ) if not stepped: raise Exception( """`constraint.update(token_id)` is not yielding incremental progress, """ """even though `constraint.does_advance(token_id)` is true.""" ) if complete: self.complete_constraints.append(_UpperCAmelCase ) UpperCAmelCase__ = None if not complete and stepped: UpperCAmelCase__ = pending_constraint if complete or stepped: # If we made any progress at all, then it's at least not a "pending constraint". UpperCAmelCase__ = ( self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :] ) if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None: # If there's no longer any pending after this and no inprogress either, then we must be # complete. UpperCAmelCase__ = True break # prevent accidentally stepping through multiple constraints with just one token. return complete, stepped def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : List[Any]=True ): """simple docstring""" UpperCAmelCase__ = ConstraintListState(self.constraints ) # we actually never though self.constraints objects # throughout this process. So it's at initialization state. if stateful: UpperCAmelCase__ = [ constraint.copy(stateful=_UpperCAmelCase ) for constraint in self.complete_constraints ] if self.inprogress_constraint is not None: UpperCAmelCase__ = self.inprogress_constraint.copy(stateful=_UpperCAmelCase ) UpperCAmelCase__ = [constraint.copy() for constraint in self.pending_constraints] return new_state
346
0
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : str = { """microsoft/resnet-50""": """https://huggingface.co/microsoft/resnet-50/blob/main/config.json""", } class lowerCamelCase_ (snake_case__ , snake_case__ ): '''simple docstring''' __UpperCamelCase: Tuple = "resnet" __UpperCamelCase: Union[str, Any] = ["basic", "bottleneck"] def __init__( self : Tuple , A : Optional[int]=3 , A : Union[str, Any]=64 , A : Dict=[256, 512, 1024, 2048] , A : Tuple=[3, 4, 6, 3] , A : Optional[Any]="bottleneck" , A : int="relu" , A : List[Any]=False , A : Optional[int]=None , A : Union[str, Any]=None , **A : List[Any] , ): super().__init__(**A ) if layer_type not in self.layer_types: raise ValueError(F"""layer_type={layer_type} is not one of {','.join(self.layer_types )}""" ) _UpperCAmelCase : Tuple = num_channels _UpperCAmelCase : int = embedding_size _UpperCAmelCase : Union[str, Any] = hidden_sizes _UpperCAmelCase : int = depths _UpperCAmelCase : Any = layer_type _UpperCAmelCase : Optional[Any] = hidden_act _UpperCAmelCase : Union[str, Any] = downsample_in_first_stage _UpperCAmelCase : Dict = ["stem"] + [F"""stage{idx}""" for idx in range(1 , len(A ) + 1 )] _UpperCAmelCase , _UpperCAmelCase : str = get_aligned_output_features_output_indices( out_features=A , out_indices=A , stage_names=self.stage_names ) class lowerCamelCase_ (snake_case__ ): '''simple docstring''' __UpperCamelCase: List[str] = version.parse("1.11" ) @property def _A ( self : Dict ): return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def _A ( self : Dict ): return 1E-3
31
'''simple docstring''' import doctest import logging import os import unittest from pathlib import Path from typing import List, Union import transformers from transformers.testing_utils import require_tf, require_torch, slow UpperCAmelCase_ = logging.getLogger() @unittest.skip("""Temporarily disable the doc tests.""" ) @require_torch @require_tf @slow class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : str , _UpperCAmelCase : Path , _UpperCAmelCase : Union[str, None] = None , _UpperCAmelCase : Union[List[str], None] = None , _UpperCAmelCase : Union[str, List[str], None] = None , _UpperCAmelCase : bool = True , ): """simple docstring""" UpperCAmelCase__ = [file for file in os.listdir(_UpperCAmelCase ) if os.path.isfile(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) )] if identifier is not None: UpperCAmelCase__ = [file for file in files if identifier in file] if n_identifier is not None: if isinstance(_UpperCAmelCase , _UpperCAmelCase ): for n_ in n_identifier: UpperCAmelCase__ = [file for file in files if n_ not in file] else: UpperCAmelCase__ = [file for file in files if n_identifier not in file] UpperCAmelCase__ = ignore_files or [] ignore_files.append("""__init__.py""" ) UpperCAmelCase__ = [file for file in files if file not in ignore_files] for file in files: # Open all files print("""Testing""" , _UpperCAmelCase ) if only_modules: UpperCAmelCase__ = file.split(""".""" )[0] try: UpperCAmelCase__ = getattr(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = doctest.DocTestSuite(_UpperCAmelCase ) UpperCAmelCase__ = unittest.TextTestRunner().run(_UpperCAmelCase ) self.assertIs(len(result.failures ) , 0 ) except AttributeError: logger.info(f'''{module_identifier} is not a module.''' ) else: UpperCAmelCase__ = doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS ) self.assertIs(result.failed , 0 ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" UpperCAmelCase__ = Path("""src/transformers""" ) UpperCAmelCase__ = """modeling""" UpperCAmelCase__ = [ """modeling_ctrl.py""", """modeling_tf_ctrl.py""", ] self.analyze_directory(_UpperCAmelCase , identifier=_UpperCAmelCase , ignore_files=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" UpperCAmelCase__ = Path("""src/transformers""" ) UpperCAmelCase__ = """tokenization""" self.analyze_directory(_UpperCAmelCase , identifier=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" UpperCAmelCase__ = Path("""src/transformers""" ) UpperCAmelCase__ = """configuration""" self.analyze_directory(_UpperCAmelCase , identifier=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" UpperCAmelCase__ = Path("""src/transformers""" ) UpperCAmelCase__ = ["""configuration""", """modeling""", """tokenization"""] self.analyze_directory(_UpperCAmelCase , n_identifier=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" UpperCAmelCase__ = Path("""docs/source""" ) UpperCAmelCase__ = ["""favicon.ico"""] self.analyze_directory(_UpperCAmelCase , ignore_files=_UpperCAmelCase , only_modules=_UpperCAmelCase )
346
0
import math import flax.linen as nn import jax.numpy as jnp def SCREAMING_SNAKE_CASE_ ( __A : jnp.ndarray , __A : int , __A : float = 1 , __A : float = 1 , __A : float = 1.0e4 , __A : bool = False , __A : float = 1.0 , ) -> jnp.ndarray: """simple docstring""" assert timesteps.ndim == 1, "Timesteps should be a 1d-array" assert embedding_dim % 2 == 0, F"""Embedding dimension {embedding_dim} should be even""" a_ : int = float(embedding_dim // 2 ) a_ : str = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift) a_ : Optional[int] = min_timescale * jnp.exp(jnp.arange(__A , dtype=jnp.floataa ) * -log_timescale_increment ) a_ : Optional[int] = jnp.expand_dims(__A , 1 ) * jnp.expand_dims(__A , 0 ) # scale embeddings a_ : str = scale * emb if flip_sin_to_cos: a_ : str = jnp.concatenate([jnp.cos(__A ), jnp.sin(__A )] , axis=1 ) else: a_ : Any = jnp.concatenate([jnp.sin(__A ), jnp.cos(__A )] , axis=1 ) a_ : Optional[int] = jnp.reshape(__A , [jnp.shape(__A )[0], embedding_dim] ) return signal class SCREAMING_SNAKE_CASE__ ( nn.Module ): snake_case__ : int = 32 snake_case__ : jnp.dtype = jnp.floataa @nn.compact def __call__( self : Tuple , SCREAMING_SNAKE_CASE__ : str ) -> List[str]: a_ : Optional[Any] = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='linear_1' )(SCREAMING_SNAKE_CASE__ ) a_ : Tuple = nn.silu(SCREAMING_SNAKE_CASE__ ) a_ : str = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='linear_2' )(SCREAMING_SNAKE_CASE__ ) return temb class SCREAMING_SNAKE_CASE__ ( nn.Module ): snake_case__ : int = 32 snake_case__ : bool = False snake_case__ : float = 1 @nn.compact def __call__( self : str , SCREAMING_SNAKE_CASE__ : int ) -> Tuple: return get_sinusoidal_embeddings( SCREAMING_SNAKE_CASE__ , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
32
'''simple docstring''' from datasets.utils.patching import _PatchedModuleObj, patch_submodule from . import _test_patching def _UpperCamelCase ( ): '''simple docstring''' import os as original_os from os import path as original_path from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join UpperCAmelCase__ = """__test_patch_submodule_mock__""" with patch_submodule(_test_patching , """os.path.join""" , SCREAMING_SNAKE_CASE__ ): # Every way to access os.path.join must be patched, and the rest must stay untouched # check os.path.join assert isinstance(_test_patching.os , _PatchedModuleObj ) assert isinstance(_test_patching.os.path , _PatchedModuleObj ) assert _test_patching.os.path.join is mock # check path.join assert isinstance(_test_patching.path , _PatchedModuleObj ) assert _test_patching.path.join is mock # check join assert _test_patching.join is mock # check that the other attributes are untouched assert _test_patching.os.rename is original_rename assert _test_patching.path.dirname is original_dirname assert _test_patching.os.path.dirname is original_dirname # Even renamed modules or objects must be patched # check renamed_os.path.join assert isinstance(_test_patching.renamed_os , _PatchedModuleObj ) assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj ) assert _test_patching.renamed_os.path.join is mock # check renamed_path.join assert isinstance(_test_patching.renamed_path , _PatchedModuleObj ) assert _test_patching.renamed_path.join is mock # check renamed_join assert _test_patching.renamed_join is mock # check that the other attributes are untouched assert _test_patching.renamed_os.rename is original_rename assert _test_patching.renamed_path.dirname is original_dirname assert _test_patching.renamed_os.path.dirname is original_dirname # check that everthing is back to normal when the patch is over assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join def _UpperCamelCase ( ): '''simple docstring''' assert _test_patching.open is open UpperCAmelCase__ = """__test_patch_submodule_builtin_mock__""" # _test_patching has "open" in its globals assert _test_patching.open is open with patch_submodule(_test_patching , """open""" , SCREAMING_SNAKE_CASE__ ): assert _test_patching.open is mock # check that everthing is back to normal when the patch is over assert _test_patching.open is open def _UpperCamelCase ( ): '''simple docstring''' UpperCAmelCase__ = """__test_patch_submodule_missing_mock__""" with patch_submodule(_test_patching , """pandas.read_csv""" , SCREAMING_SNAKE_CASE__ ): pass def _UpperCamelCase ( ): '''simple docstring''' UpperCAmelCase__ = """__test_patch_submodule_missing_builtin_mock__""" # _test_patching doesn't have "len" in its globals assert getattr(_test_patching , """len""" , SCREAMING_SNAKE_CASE__ ) is None with patch_submodule(_test_patching , """len""" , SCREAMING_SNAKE_CASE__ ): assert _test_patching.len is mock assert _test_patching.len is len def _UpperCamelCase ( ): '''simple docstring''' UpperCAmelCase__ = """__test_patch_submodule_start_and_stop_mock__""" UpperCAmelCase__ = patch_submodule(_test_patching , """open""" , SCREAMING_SNAKE_CASE__ ) assert _test_patching.open is open patch.start() assert _test_patching.open is mock patch.stop() assert _test_patching.open is open def _UpperCamelCase ( ): '''simple docstring''' from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join UpperCAmelCase__ = """__test_patch_submodule_successive_join__""" UpperCAmelCase__ = """__test_patch_submodule_successive_dirname__""" UpperCAmelCase__ = """__test_patch_submodule_successive_rename__""" assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename with patch_submodule(_test_patching , """os.path.join""" , SCREAMING_SNAKE_CASE__ ): with patch_submodule(_test_patching , """os.rename""" , SCREAMING_SNAKE_CASE__ ): with patch_submodule(_test_patching , """os.path.dirname""" , SCREAMING_SNAKE_CASE__ ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename # try another order with patch_submodule(_test_patching , """os.rename""" , SCREAMING_SNAKE_CASE__ ): with patch_submodule(_test_patching , """os.path.join""" , SCREAMING_SNAKE_CASE__ ): with patch_submodule(_test_patching , """os.path.dirname""" , SCREAMING_SNAKE_CASE__ ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename def _UpperCamelCase ( ): '''simple docstring''' UpperCAmelCase__ = """__test_patch_submodule_doesnt_exist_mock__""" with patch_submodule(_test_patching , """__module_that_doesn_exist__.__attribute_that_doesn_exist__""" , SCREAMING_SNAKE_CASE__ ): pass with patch_submodule(_test_patching , """os.__attribute_that_doesn_exist__""" , SCREAMING_SNAKE_CASE__ ): pass
346
0
"""simple docstring""" import math import sys import cva import numpy as np def lowercase ( __snake_case : np.ndarray , __snake_case : float ): # For applying gaussian function for each element in matrix. lowercase_ : Union[str, Any] = math.sqrt(__snake_case ) lowercase_ : Any = 1 / (sigma * math.sqrt(2 * math.pi )) return cons * np.exp(-((img / sigma) ** 2) * 0.5 ) def lowercase ( __snake_case : np.ndarray , __snake_case : int , __snake_case : int , __snake_case : int ): lowercase_ : List[str] = kernel_size // 2 return img[x - half : x + half + 1, y - half : y + half + 1] def lowercase ( __snake_case : int , __snake_case : float ): # Creates a gaussian kernel of given dimension. lowercase_ : Tuple = np.zeros((kernel_size, kernel_size) ) for i in range(0 , __snake_case ): for j in range(0 , __snake_case ): lowercase_ : List[str] = math.sqrt( abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 ) return vec_gaussian(__snake_case , __snake_case ) def lowercase ( __snake_case : np.ndarray , __snake_case : float , __snake_case : float , __snake_case : int , ): lowercase_ : Tuple = np.zeros(img.shape ) lowercase_ : Union[str, Any] = get_gauss_kernel(__snake_case , __snake_case ) lowercase_ , lowercase_ : List[Any] = img.shape for i in range(kernel_size // 2 , size_x - kernel_size // 2 ): for j in range(kernel_size // 2 , size_y - kernel_size // 2 ): lowercase_ : str = get_slice(__snake_case , __snake_case , __snake_case , __snake_case ) lowercase_ : Any = img_s - img_s[kernel_size // 2, kernel_size // 2] lowercase_ : List[Any] = vec_gaussian(__snake_case , __snake_case ) lowercase_ : List[Any] = np.multiply(__snake_case , __snake_case ) lowercase_ : Union[str, Any] = np.multiply(__snake_case , __snake_case ) lowercase_ : Any = np.sum(__snake_case ) / np.sum(__snake_case ) lowercase_ : Optional[Any] = val return imga def lowercase ( __snake_case : list ): lowercase_ : Optional[Any] = args[1] if args[1:] else '''../image_data/lena.jpg''' lowercase_ : Dict = float(args[2] ) if args[2:] else 1.0 lowercase_ : int = float(args[3] ) if args[3:] else 1.0 if args[4:]: lowercase_ : str = int(args[4] ) lowercase_ : Optional[int] = kernel_size + abs(kernel_size % 2 - 1 ) else: lowercase_ : Dict = 5 return filename, spatial_variance, intensity_variance, kernel_size if __name__ == "__main__": __A , __A , __A , __A : List[str] = parse_args(sys.argv) __A : str = cva.imread(filename, 0) cva.imshow('''input image''', img) __A : str = img / 255 __A : Any = out.astype('''float32''') __A : List[Any] = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size) __A : Any = out * 255 __A : Optional[int] = np.uinta(out) cva.imshow('''output image''', out) cva.waitKey(0) cva.destroyAllWindows()
33
'''simple docstring''' from timeit import timeit UpperCAmelCase_ = { 'MALAYALAM': True, 'String': False, 'rotor': True, 'level': True, 'A': True, 'BB': True, 'ABC': False, 'amanaplanacanalpanama': True, # "a man a plan a canal panama" } # Ensure our test data is valid assert all((key == key[::-1]) is value for key, value in test_data.items()) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' UpperCAmelCase__ = 0 UpperCAmelCase__ = len(SCREAMING_SNAKE_CASE__ ) - 1 while start_i < end_i: if s[start_i] == s[end_i]: start_i += 1 end_i -= 1 else: return False return True def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' UpperCAmelCase__ = len(SCREAMING_SNAKE_CASE__ ) // 2 UpperCAmelCase__ = len(SCREAMING_SNAKE_CASE__ ) # We need to traverse till half of the length of string # as we can get access of the i'th last element from # i'th index. # eg: [0,1,2,3,4,5] => 4th index can be accessed # with the help of 1st index (i==n-i-1) # where n is length of string return all(s[i] == s[n - i - 1] for i in range(SCREAMING_SNAKE_CASE__ ) ) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' if len(SCREAMING_SNAKE_CASE__ ) <= 2: return True if s[0] == s[len(SCREAMING_SNAKE_CASE__ ) - 1]: return is_palindrome_recursive(s[1:-1] ) else: return False def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' return s == s[::-1] def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' UpperCAmelCase__ = F'''all({name}(key) is value for key, value in test_data.items())''' UpperCAmelCase__ = F'''from __main__ import test_data, {name}''' UpperCAmelCase__ = 500000 UpperCAmelCase__ = timeit(stmt=SCREAMING_SNAKE_CASE__ , setup=SCREAMING_SNAKE_CASE__ , number=SCREAMING_SNAKE_CASE__ ) print(F'''{name:<35} finished {number:,} runs in {result:.5f} seconds''' ) if __name__ == "__main__": for key, value in test_data.items(): assert is_palindrome(key) is is_palindrome_recursive(key) assert is_palindrome(key) is is_palindrome_slice(key) print(f"{key:21} {value}") print('a man a plan a canal panama') # finished 500,000 runs in 0.46793 seconds benchmark_function('is_palindrome_slice') # finished 500,000 runs in 0.85234 seconds benchmark_function('is_palindrome') # finished 500,000 runs in 1.32028 seconds benchmark_function('is_palindrome_recursive') # finished 500,000 runs in 2.08679 seconds benchmark_function('is_palindrome_traversal')
346
0
'''simple docstring''' import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class _a ( __a ): __a : Optional[Any] = (DEISMultistepScheduler,) __a : Any = (("""num_inference_steps""", 25),) def A ( self : Any , **lowercase : Optional[int] ): '''simple docstring''' UpperCAmelCase = { '''num_train_timesteps''': 1_000, '''beta_start''': 0.0001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''solver_order''': 2, } config.update(**lowercase ) return config def A ( self : Union[str, Any] , lowercase : Optional[Any]=0 , **lowercase : Union[str, Any] ): '''simple docstring''' UpperCAmelCase = dict(self.forward_default_kwargs ) UpperCAmelCase = kwargs.pop('''num_inference_steps''' , lowercase ) UpperCAmelCase = self.dummy_sample UpperCAmelCase = 0.1 * sample UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: UpperCAmelCase = self.get_scheduler_config(**lowercase ) UpperCAmelCase = scheduler_class(**lowercase ) scheduler.set_timesteps(lowercase ) # copy over dummy past residuals UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowercase ) UpperCAmelCase = scheduler_class.from_pretrained(lowercase ) new_scheduler.set_timesteps(lowercase ) # copy over dummy past residuals UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order] UpperCAmelCase , UpperCAmelCase = sample, sample for t in range(lowercase , time_step + scheduler.config.solver_order + 1 ): UpperCAmelCase = scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample UpperCAmelCase = new_scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def A ( self : int ): '''simple docstring''' pass def A ( self : str , lowercase : Any=0 , **lowercase : Tuple ): '''simple docstring''' UpperCAmelCase = dict(self.forward_default_kwargs ) UpperCAmelCase = kwargs.pop('''num_inference_steps''' , lowercase ) UpperCAmelCase = self.dummy_sample UpperCAmelCase = 0.1 * sample UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: UpperCAmelCase = self.get_scheduler_config() UpperCAmelCase = scheduler_class(**lowercase ) scheduler.set_timesteps(lowercase ) # copy over dummy past residuals (must be after setting timesteps) UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowercase ) UpperCAmelCase = scheduler_class.from_pretrained(lowercase ) # copy over dummy past residuals new_scheduler.set_timesteps(lowercase ) # copy over dummy past residual (must be after setting timesteps) UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order] UpperCAmelCase = scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample UpperCAmelCase = new_scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def A ( self : Any , lowercase : List[str]=None , **lowercase : List[Any] ): '''simple docstring''' if scheduler is None: UpperCAmelCase = self.scheduler_classes[0] UpperCAmelCase = self.get_scheduler_config(**lowercase ) UpperCAmelCase = scheduler_class(**lowercase ) UpperCAmelCase = self.scheduler_classes[0] UpperCAmelCase = self.get_scheduler_config(**lowercase ) UpperCAmelCase = scheduler_class(**lowercase ) UpperCAmelCase = 10 UpperCAmelCase = self.dummy_model() UpperCAmelCase = self.dummy_sample_deter scheduler.set_timesteps(lowercase ) for i, t in enumerate(scheduler.timesteps ): UpperCAmelCase = model(lowercase , lowercase ) UpperCAmelCase = scheduler.step(lowercase , lowercase , lowercase ).prev_sample return sample def A ( self : List[str] ): '''simple docstring''' UpperCAmelCase = dict(self.forward_default_kwargs ) UpperCAmelCase = kwargs.pop('''num_inference_steps''' , lowercase ) for scheduler_class in self.scheduler_classes: UpperCAmelCase = self.get_scheduler_config() UpperCAmelCase = scheduler_class(**lowercase ) UpperCAmelCase = self.dummy_sample UpperCAmelCase = 0.1 * sample if num_inference_steps is not None and hasattr(lowercase , '''set_timesteps''' ): scheduler.set_timesteps(lowercase ) elif num_inference_steps is not None and not hasattr(lowercase , '''set_timesteps''' ): UpperCAmelCase = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10] UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] UpperCAmelCase = scheduler.timesteps[5] UpperCAmelCase = scheduler.timesteps[6] UpperCAmelCase = scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample UpperCAmelCase = scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def A ( self : str ): '''simple docstring''' UpperCAmelCase = DEISMultistepScheduler(**self.get_scheduler_config() ) UpperCAmelCase = self.full_loop(scheduler=lowercase ) UpperCAmelCase = torch.mean(torch.abs(lowercase ) ) assert abs(result_mean.item() - 0.2_3916 ) < 1E-3 UpperCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config ) UpperCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config ) UpperCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config ) UpperCAmelCase = DEISMultistepScheduler.from_config(scheduler.config ) UpperCAmelCase = self.full_loop(scheduler=lowercase ) UpperCAmelCase = torch.mean(torch.abs(lowercase ) ) assert abs(result_mean.item() - 0.2_3916 ) < 1E-3 def A ( self : Dict ): '''simple docstring''' for timesteps in [25, 50, 100, 999, 1_000]: self.check_over_configs(num_train_timesteps=lowercase ) def A ( self : int ): '''simple docstring''' self.check_over_configs(thresholding=lowercase ) for order in [1, 2, 3]: for solver_type in ["logrho"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=lowercase , prediction_type=lowercase , sample_max_value=lowercase , algorithm_type='''deis''' , solver_order=lowercase , solver_type=lowercase , ) def A ( self : Optional[int] ): '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=lowercase ) def A ( self : Tuple ): '''simple docstring''' for algorithm_type in ["deis"]: for solver_type in ["logrho"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=lowercase , solver_type=lowercase , prediction_type=lowercase , algorithm_type=lowercase , ) UpperCAmelCase = self.full_loop( solver_order=lowercase , solver_type=lowercase , prediction_type=lowercase , algorithm_type=lowercase , ) assert not torch.isnan(lowercase ).any(), "Samples have nan numbers" def A ( self : int ): '''simple docstring''' self.check_over_configs(lower_order_final=lowercase ) self.check_over_configs(lower_order_final=lowercase ) def A ( self : List[Any] ): '''simple docstring''' for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]: self.check_over_forward(num_inference_steps=lowercase , time_step=0 ) def A ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase = self.full_loop() UpperCAmelCase = torch.mean(torch.abs(lowercase ) ) assert abs(result_mean.item() - 0.2_3916 ) < 1E-3 def A ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase = self.full_loop(prediction_type='''v_prediction''' ) UpperCAmelCase = torch.mean(torch.abs(lowercase ) ) assert abs(result_mean.item() - 0.091 ) < 1E-3 def A ( self : List[str] ): '''simple docstring''' UpperCAmelCase = self.scheduler_classes[0] UpperCAmelCase = self.get_scheduler_config(thresholding=lowercase , dynamic_thresholding_ratio=0 ) UpperCAmelCase = scheduler_class(**lowercase ) UpperCAmelCase = 10 UpperCAmelCase = self.dummy_model() UpperCAmelCase = self.dummy_sample_deter.half() scheduler.set_timesteps(lowercase ) for i, t in enumerate(scheduler.timesteps ): UpperCAmelCase = model(lowercase , lowercase ) UpperCAmelCase = scheduler.step(lowercase , lowercase , lowercase ).prev_sample assert sample.dtype == torch.floataa
34
'''simple docstring''' import datasets from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py UpperCAmelCase_ = '\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",\n author = "Lin, Chin-Yew and\n Och, Franz Josef",\n booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",\n month = "aug 23{--}aug 27",\n year = "2004",\n address = "Geneva, Switzerland",\n publisher = "COLING",\n url = "https://www.aclweb.org/anthology/C04-1072",\n pages = "501--507",\n}\n' UpperCAmelCase_ = '\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,\nthe better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n' UpperCAmelCase_ = '\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n \'bleu\': bleu score,\n \'precisions\': geometric mean of n-gram precisions,\n \'brevity_penalty\': brevity penalty,\n \'length_ratio\': ratio of lengths,\n \'translation_length\': translation_length,\n \'reference_length\': reference_length\nExamples:\n\n >>> predictions = [\n ... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample\n ... ["foo", "bar", "foobar"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)\n ... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric("bleu")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results["bleu"])\n 1.0\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase_ ( datasets.Metric ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : int ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ), """references""": datasets.Sequence( datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ), } ) , codebase_urls=["""https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"""] , reference_urls=[ """https://en.wikipedia.org/wiki/BLEU""", """https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""", ] , ) def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any]=4 , _UpperCAmelCase : Union[str, Any]=False ): """simple docstring""" UpperCAmelCase__ = compute_bleu( reference_corpus=_UpperCAmelCase , translation_corpus=_UpperCAmelCase , max_order=_UpperCAmelCase , smooth=_UpperCAmelCase ) ((UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__)) = score return { "bleu": bleu, "precisions": precisions, "brevity_penalty": bp, "length_ratio": ratio, "translation_length": translation_length, "reference_length": reference_length, }
346
0
'''simple docstring''' import numpy as np from transformers import Pipeline def __snake_case( _lowerCAmelCase ) -> Optional[int]: snake_case__ : Optional[Any] = np.max(_lowerCAmelCase , axis=-1 , keepdims=_lowerCAmelCase ) snake_case__ : List[str] = np.exp(outputs - maxes ) return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=_lowerCAmelCase ) class UpperCAmelCase_ ( _a ): """simple docstring""" def lowerCamelCase ( self : Optional[Any] , **snake_case_ : int ): snake_case__ : Optional[int] = {} if "second_text" in kwargs: snake_case__ : Union[str, Any] = kwargs["""second_text"""] return preprocess_kwargs, {}, {} def lowerCamelCase ( self : str , snake_case_ : Tuple , snake_case_ : Union[str, Any]=None ): return self.tokenizer(snake_case_ , text_pair=snake_case_ , return_tensors=self.framework ) def lowerCamelCase ( self : List[Any] , snake_case_ : Dict ): return self.model(**snake_case_ ) def lowerCamelCase ( self : int , snake_case_ : List[Any] ): snake_case__ : Union[str, Any] = model_outputs.logits[0].numpy() snake_case__ : List[str] = softmax(snake_case_ ) snake_case__ : List[str] = np.argmax(snake_case_ ) snake_case__ : List[str] = self.model.config.idalabel[best_class] snake_case__ : Optional[int] = probabilities[best_class].item() snake_case__ : str = logits.tolist() return {"label": label, "score": score, "logits": logits}
35
'''simple docstring''' from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import torch from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available @dataclass class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' lowerCAmelCase_ : Union[List[np.ndarray], torch.FloatTensor] try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_text_to_video_synth import TextToVideoSDPipeline from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401 from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
346
0
from math import ceil def A ( _lowerCamelCase = 1_001 ): '''simple docstring''' _lowerCAmelCase : int = 1 for i in range(1 , int(ceil(n / 2.0 ) ) ): _lowerCAmelCase : List[Any] = 2 * i + 1 _lowerCAmelCase : str = 2 * i _lowerCAmelCase : List[str] = total + 4 * odd**2 - 6 * even return total if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution()) else: try: _snake_case = int(sys.argv[1]) print(solution(n)) except ValueError: print("Invalid entry - please enter a number")
36
'''simple docstring''' import torch import torch.nn as nn from transformers.modeling_utils import ModuleUtilsMixin from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class lowerCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): '''simple docstring''' @register_to_config def __init__( self : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : float , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : bool = False , ): """simple docstring""" super().__init__() UpperCAmelCase__ = nn.Embedding(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = nn.Embedding(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = False UpperCAmelCase__ = nn.Dropout(p=_UpperCAmelCase ) UpperCAmelCase__ = TaConfig( vocab_size=_UpperCAmelCase , d_model=_UpperCAmelCase , num_heads=_UpperCAmelCase , d_kv=_UpperCAmelCase , d_ff=_UpperCAmelCase , dropout_rate=_UpperCAmelCase , feed_forward_proj=_UpperCAmelCase , is_decoder=_UpperCAmelCase , is_encoder_decoder=_UpperCAmelCase , ) UpperCAmelCase__ = nn.ModuleList() for lyr_num in range(_UpperCAmelCase ): UpperCAmelCase__ = TaBlock(_UpperCAmelCase ) self.encoders.append(_UpperCAmelCase ) UpperCAmelCase__ = TaLayerNorm(_UpperCAmelCase ) UpperCAmelCase__ = nn.Dropout(p=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : int , _UpperCAmelCase : str ): """simple docstring""" UpperCAmelCase__ = self.token_embedder(_UpperCAmelCase ) UpperCAmelCase__ = encoder_input_tokens.shape[1] UpperCAmelCase__ = torch.arange(_UpperCAmelCase , device=encoder_input_tokens.device ) x += self.position_encoding(_UpperCAmelCase ) UpperCAmelCase__ = self.dropout_pre(_UpperCAmelCase ) # inverted the attention mask UpperCAmelCase__ = encoder_input_tokens.size() UpperCAmelCase__ = self.get_extended_attention_mask(_UpperCAmelCase , _UpperCAmelCase ) for lyr in self.encoders: UpperCAmelCase__ = lyr(_UpperCAmelCase , _UpperCAmelCase )[0] UpperCAmelCase__ = self.layer_norm(_UpperCAmelCase ) return self.dropout_post(_UpperCAmelCase ), encoder_inputs_mask
346
0
'''simple docstring''' import argparse from collections import OrderedDict from pathlib import Path import requests import torch from PIL import Image from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor from transformers.utils import logging logging.set_verbosity_info() _lowerCAmelCase = logging.get_logger(__name__) def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : str = OrderedDict() for key, value in state_dict.items(): if key.startswith("""module.encoder""" ): lowerCAmelCase__ : Optional[Any] = key.replace("""module.encoder""" , """glpn.encoder""" ) if key.startswith("""module.decoder""" ): lowerCAmelCase__ : int = key.replace("""module.decoder""" , """decoder.stages""" ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 lowerCAmelCase__ : Dict = key[key.find("""patch_embed""" ) + len("""patch_embed""" )] lowerCAmelCase__ : Optional[Any] = key.replace(f"""patch_embed{idx}""" , f"""patch_embeddings.{int(UpperCamelCase )-1}""" ) if "norm" in key: lowerCAmelCase__ : str = key.replace("""norm""" , """layer_norm""" ) if "glpn.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 lowerCAmelCase__ : Any = key[key.find("""glpn.encoder.layer_norm""" ) + len("""glpn.encoder.layer_norm""" )] lowerCAmelCase__ : Tuple = key.replace(f"""layer_norm{idx}""" , f"""layer_norm.{int(UpperCamelCase )-1}""" ) if "layer_norm1" in key: lowerCAmelCase__ : Optional[Any] = key.replace("""layer_norm1""" , """layer_norm_1""" ) if "layer_norm2" in key: lowerCAmelCase__ : str = key.replace("""layer_norm2""" , """layer_norm_2""" ) if "block" in key: # replace for example block1 by block.0 lowerCAmelCase__ : List[str] = key[key.find("""block""" ) + len("""block""" )] lowerCAmelCase__ : List[str] = key.replace(f"""block{idx}""" , f"""block.{int(UpperCamelCase )-1}""" ) if "attn.q" in key: lowerCAmelCase__ : Optional[Any] = key.replace("""attn.q""" , """attention.self.query""" ) if "attn.proj" in key: lowerCAmelCase__ : Union[str, Any] = key.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in key: lowerCAmelCase__ : Optional[Any] = key.replace("""attn""" , """attention.self""" ) if "fc1" in key: lowerCAmelCase__ : Dict = key.replace("""fc1""" , """dense1""" ) if "fc2" in key: lowerCAmelCase__ : Any = key.replace("""fc2""" , """dense2""" ) if "linear_pred" in key: lowerCAmelCase__ : Dict = key.replace("""linear_pred""" , """classifier""" ) if "linear_fuse" in key: lowerCAmelCase__ : Any = key.replace("""linear_fuse.conv""" , """linear_fuse""" ) lowerCAmelCase__ : Any = key.replace("""linear_fuse.bn""" , """batch_norm""" ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 lowerCAmelCase__ : Any = key[key.find("""linear_c""" ) + len("""linear_c""" )] lowerCAmelCase__ : Optional[Any] = key.replace(f"""linear_c{idx}""" , f"""linear_c.{int(UpperCamelCase )-1}""" ) if "bot_conv" in key: lowerCAmelCase__ : Optional[int] = key.replace("""bot_conv""" , """0.convolution""" ) if "skip_conv1" in key: lowerCAmelCase__ : Optional[int] = key.replace("""skip_conv1""" , """1.convolution""" ) if "skip_conv2" in key: lowerCAmelCase__ : Any = key.replace("""skip_conv2""" , """2.convolution""" ) if "fusion1" in key: lowerCAmelCase__ : List[str] = key.replace("""fusion1""" , """1.fusion""" ) if "fusion2" in key: lowerCAmelCase__ : List[str] = key.replace("""fusion2""" , """2.fusion""" ) if "fusion3" in key: lowerCAmelCase__ : List[Any] = key.replace("""fusion3""" , """3.fusion""" ) if "fusion" in key and "conv" in key: lowerCAmelCase__ : str = key.replace("""conv""" , """convolutional_layer""" ) if key.startswith("""module.last_layer_depth""" ): lowerCAmelCase__ : Union[str, Any] = key.replace("""module.last_layer_depth""" , """head.head""" ) lowerCAmelCase__ : List[Any] = value return new_state_dict def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) lowerCAmelCase__ : Optional[int] = state_dict.pop(f"""glpn.encoder.block.{i}.{j}.attention.self.kv.weight""" ) lowerCAmelCase__ : List[Any] = state_dict.pop(f"""glpn.encoder.block.{i}.{j}.attention.self.kv.bias""" ) # next, add keys and values (in that order) to the state dict lowerCAmelCase__ : List[Any] = kv_weight[ : config.hidden_sizes[i], : ] lowerCAmelCase__ : Optional[Any] = kv_bias[: config.hidden_sizes[i]] lowerCAmelCase__ : List[str] = kv_weight[ config.hidden_sizes[i] :, : ] lowerCAmelCase__ : Union[str, Any] = kv_bias[config.hidden_sizes[i] :] def _SCREAMING_SNAKE_CASE ( ): """simple docstring""" lowerCAmelCase__ : Tuple = """http://images.cocodataset.org/val2017/000000039769.jpg""" lowerCAmelCase__ : str = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw ) return image @torch.no_grad() def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase=False , UpperCamelCase=None ): """simple docstring""" lowerCAmelCase__ : Tuple = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] ) # load image processor (only resize + rescale) lowerCAmelCase__ : str = GLPNImageProcessor() # prepare image lowerCAmelCase__ : Dict = prepare_img() lowerCAmelCase__ : Optional[Any] = image_processor(images=UpperCamelCase , return_tensors="""pt""" ).pixel_values logger.info("""Converting model...""" ) # load original state dict lowerCAmelCase__ : List[Any] = torch.load(UpperCamelCase , map_location=torch.device("""cpu""" ) ) # rename keys lowerCAmelCase__ : Any = rename_keys(UpperCamelCase ) # key and value matrices need special treatment read_in_k_v(UpperCamelCase , UpperCamelCase ) # create HuggingFace model and load state dict lowerCAmelCase__ : List[Any] = GLPNForDepthEstimation(UpperCamelCase ) model.load_state_dict(UpperCamelCase ) model.eval() # forward pass lowerCAmelCase__ : int = model(UpperCamelCase ) lowerCAmelCase__ : List[Any] = outputs.predicted_depth # verify output if model_name is not None: if "nyu" in model_name: lowerCAmelCase__ : str = torch.tensor( [[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] ) elif "kitti" in model_name: lowerCAmelCase__ : Optional[int] = torch.tensor( [[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] ) else: raise ValueError(f"""Unknown model name: {model_name}""" ) lowerCAmelCase__ : Optional[int] = torch.Size([1, 480, 640] ) assert predicted_depth.shape == expected_shape assert torch.allclose(predicted_depth[0, :3, :3] , UpperCamelCase , atol=1e-4 ) print("""Looks ok!""" ) # finally, push to hub if required if push_to_hub: logger.info("""Pushing model and image processor to the hub...""" ) model.push_to_hub( repo_path_or_name=Path(UpperCamelCase , UpperCamelCase ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=UpperCamelCase , ) image_processor.push_to_hub( repo_path_or_name=Path(UpperCamelCase , UpperCamelCase ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=UpperCamelCase , ) if __name__ == "__main__": _lowerCAmelCase = argparse.ArgumentParser() parser.add_argument( '''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to upload the model to the HuggingFace hub.''' ) parser.add_argument( '''--model_name''', default='''glpn-kitti''', type=str, help='''Name of the model in case you\'re pushing to the hub.''', ) _lowerCAmelCase = parser.parse_args() convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
37
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConfig, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaForPreTraining, WavaVecaProcessor, logging, ) from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification logging.set_verbosity_info() UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'adapter_layer': 'encoder.layers.*.adapter_layer', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', 'pooling_layer.linear': 'projector', 'pooling_layer.projection': 'classifier', } UpperCAmelCase_ = [ 'lm_head', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', 'projector', 'classifier', ] def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Tuple ): '''simple docstring''' UpperCAmelCase__ = {} with open(SCREAMING_SNAKE_CASE__ , """r""" ) as file: for line_number, line in enumerate(SCREAMING_SNAKE_CASE__ ): UpperCAmelCase__ = line.strip() if line: UpperCAmelCase__ = line.split() UpperCAmelCase__ = line_number UpperCAmelCase__ = words[0] UpperCAmelCase__ = value return result def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ): '''simple docstring''' for attribute in key.split(""".""" ): UpperCAmelCase__ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(SCREAMING_SNAKE_CASE__ ): UpperCAmelCase__ = PARAM_MAPPING[full_name.split(""".""" )[-1]] UpperCAmelCase__ = """param""" if weight_type is not None and weight_type != "param": UpperCAmelCase__ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).shape elif weight_type is not None and weight_type == "param": UpperCAmelCase__ = hf_pointer for attribute in hf_param_name.split(""".""" ): UpperCAmelCase__ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = shape_pointer.shape # let's reduce dimension UpperCAmelCase__ = value[0] else: UpperCAmelCase__ = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": UpperCAmelCase__ = value elif weight_type == "weight_g": UpperCAmelCase__ = value elif weight_type == "weight_v": UpperCAmelCase__ = value elif weight_type == "bias": UpperCAmelCase__ = value elif weight_type == "param": for attribute in hf_param_name.split(""".""" ): UpperCAmelCase__ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = value else: UpperCAmelCase__ = value logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' ) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(SCREAMING_SNAKE_CASE__ ): UpperCAmelCase__ = PARAM_MAPPING[full_name.split(""".""" )[-1]] UpperCAmelCase__ = """param""" if weight_type is not None and weight_type != "param": UpperCAmelCase__ = """.""".join([key, weight_type] ) elif weight_type is not None and weight_type == "param": UpperCAmelCase__ = """.""".join([key, hf_param_name] ) else: UpperCAmelCase__ = key UpperCAmelCase__ = value if """lm_head""" in full_key else value[0] UpperCAmelCase_ = { 'W_a': 'linear_1.weight', 'W_b': 'linear_2.weight', 'b_a': 'linear_1.bias', 'b_b': 'linear_2.bias', 'ln_W': 'norm.weight', 'ln_b': 'norm.bias', } def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None ): '''simple docstring''' UpperCAmelCase__ = False for key, mapped_key in MAPPING.items(): UpperCAmelCase__ = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: UpperCAmelCase__ = True if "*" in mapped_key: UpperCAmelCase__ = name.split(SCREAMING_SNAKE_CASE__ )[0].split(""".""" )[-2] UpperCAmelCase__ = mapped_key.replace("""*""" , SCREAMING_SNAKE_CASE__ ) if "weight_g" in name: UpperCAmelCase__ = """weight_g""" elif "weight_v" in name: UpperCAmelCase__ = """weight_v""" elif "bias" in name: UpperCAmelCase__ = """bias""" elif "weight" in name: # TODO: don't match quantizer.weight_proj UpperCAmelCase__ = """weight""" else: UpperCAmelCase__ = None if hf_dict is not None: rename_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else: set_recursively(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return is_used return is_used def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] ): '''simple docstring''' UpperCAmelCase__ = [] UpperCAmelCase__ = fairseq_model.state_dict() UpperCAmelCase__ = hf_model.wavaveca.feature_extractor for name, value in fairseq_dict.items(): UpperCAmelCase__ = False if "conv_layers" in name: load_conv_layer( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , hf_model.config.feat_extract_norm == """group""" , ) UpperCAmelCase__ = True else: UpperCAmelCase__ = load_wavaveca_layer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if not is_used: unused_weights.append(SCREAMING_SNAKE_CASE__ ) logger.warning(F'''Unused weights: {unused_weights}''' ) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' UpperCAmelCase__ = full_name.split("""conv_layers.""" )[-1] UpperCAmelCase__ = name.split(""".""" ) UpperCAmelCase__ = int(items[0] ) UpperCAmelCase__ = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) UpperCAmelCase__ = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) UpperCAmelCase__ = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' ) UpperCAmelCase__ = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' ) UpperCAmelCase__ = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(SCREAMING_SNAKE_CASE__ ) @torch.no_grad() def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False ): '''simple docstring''' if config_path is not None: UpperCAmelCase__ = WavaVecaConfig.from_pretrained(SCREAMING_SNAKE_CASE__ ) else: UpperCAmelCase__ = WavaVecaConfig() if is_seq_class: UpperCAmelCase__ = read_txt_into_dict(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = idalabel UpperCAmelCase__ = WavaVecaForSequenceClassification(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , ) feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE__ ) elif is_finetuned: if dict_path: UpperCAmelCase__ = Dictionary.load(SCREAMING_SNAKE_CASE__ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq UpperCAmelCase__ = target_dict.pad_index UpperCAmelCase__ = target_dict.bos_index UpperCAmelCase__ = target_dict.eos_index UpperCAmelCase__ = len(target_dict.symbols ) UpperCAmelCase__ = os.path.join(SCREAMING_SNAKE_CASE__ , """vocab.json""" ) if not os.path.isdir(SCREAMING_SNAKE_CASE__ ): logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(SCREAMING_SNAKE_CASE__ ) ) return os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = target_dict.indices # fairseq has the <pad> and <s> switched UpperCAmelCase__ = 0 UpperCAmelCase__ = 1 with open(SCREAMING_SNAKE_CASE__ , """w""" , encoding="""utf-8""" ) as vocab_handle: json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = WavaVecaCTCTokenizer( SCREAMING_SNAKE_CASE__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=SCREAMING_SNAKE_CASE__ , ) UpperCAmelCase__ = True if config.feat_extract_norm == """layer""" else False UpperCAmelCase__ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , ) UpperCAmelCase__ = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ ) processor.save_pretrained(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = WavaVecaForCTC(SCREAMING_SNAKE_CASE__ ) else: UpperCAmelCase__ = WavaVecaForPreTraining(SCREAMING_SNAKE_CASE__ ) if is_finetuned or is_seq_class: UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) else: UpperCAmelCase__ = argparse.Namespace(task="""audio_pretraining""" ) UpperCAmelCase__ = fairseq.tasks.setup_task(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = model[0].eval() recursively_load_weights(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , not is_finetuned ) hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not' ) parser.add_argument( '--is_seq_class', action='store_true', help='Whether the model to convert is a fine-tuned sequence classification model or not', ) UpperCAmelCase_ = parser.parse_args() UpperCAmelCase_ = not args.not_finetuned and not args.is_seq_class convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, is_finetuned, args.is_seq_class, )
346
0
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : str , __magic_name__ : str ) -> float: """simple docstring""" def get_matched_characters(__magic_name__ : str , __magic_name__ : str ) -> str: UpperCamelCase :List[Any] = [] UpperCamelCase :List[Any] = min(len(_stra ) , len(_stra ) ) // 2 for i, l in enumerate(_stra ): UpperCamelCase :Union[str, Any] = int(max(0 , i - limit ) ) UpperCamelCase :Optional[int] = int(min(i + limit + 1 , len(_stra ) ) ) if l in _stra[left:right]: matched.append(__magic_name__ ) UpperCamelCase :str = f"""{_stra[0:_stra.index(__magic_name__ )]} {_stra[_stra.index(__magic_name__ ) + 1:]}""" return "".join(__magic_name__ ) # matching characters UpperCamelCase :Dict = get_matched_characters(__magic_name__ , __magic_name__ ) UpperCamelCase :List[str] = get_matched_characters(__magic_name__ , __magic_name__ ) UpperCamelCase :Tuple = len(__magic_name__ ) # transposition UpperCamelCase :Tuple = ( len([(ca, ca) for ca, ca in zip(__magic_name__ , __magic_name__ ) if ca != ca] ) // 2 ) if not match_count: UpperCamelCase :List[str] = 0.0 else: UpperCamelCase :Tuple = ( 1 / 3 * ( match_count / len(__magic_name__ ) + match_count / len(__magic_name__ ) + (match_count - transpositions) / match_count ) ) # common prefix up to 4 characters UpperCamelCase :str = 0 for ca, ca in zip(stra[:4] , stra[:4] ): if ca == ca: prefix_len += 1 else: break return jaro + 0.1 * prefix_len * (1 - jaro) if __name__ == "__main__": import doctest doctest.testmod() print(jaro_winkler('''hello''', '''world'''))
38
'''simple docstring''' import itertools import os from collections import Counter, defaultdict from concurrent.futures import ThreadPoolExecutor, as_completed import numpy as np import datasets from .execute import check_correctness UpperCAmelCase_ = '\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n' UpperCAmelCase_ = '\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper "Evaluating Large Language Models Trained on Code"\n(https://arxiv.org/abs/2107.03374).\n' UpperCAmelCase_ = '\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric("code_eval")\n >>> test_cases = ["assert add(2,3)==5"]\n >>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {\'pass@1\': 0.5, \'pass@2\': 1.0}\n' UpperCAmelCase_ = '\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe "code_eval" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper "Evaluating Large\nLanguage Models Trained on Code" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"\n\n################################################################################\\n' UpperCAmelCase_ = 'The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the "Software"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase_ ( datasets.Metric ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" return datasets.MetricInfo( # This is the description that will appear on the metrics page. description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""" ) ), """references""": datasets.Value("""string""" ), } ) , homepage="""https://github.com/openai/human-eval""" , codebase_urls=["""https://github.com/openai/human-eval"""] , reference_urls=["""https://github.com/openai/human-eval"""] , license=_LICENSE , ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str]=[1, 10, 1_00] , _UpperCAmelCase : Optional[Any]=4 , _UpperCAmelCase : Any=3.0 ): """simple docstring""" if os.getenv("""HF_ALLOW_CODE_EVAL""" , 0 ) != "1": raise ValueError(_WARNING ) if os.name == "nt": raise NotImplementedError("""This metric is currently not supported on Windows.""" ) with ThreadPoolExecutor(max_workers=_UpperCAmelCase ) as executor: UpperCAmelCase__ = [] UpperCAmelCase__ = Counter() UpperCAmelCase__ = 0 UpperCAmelCase__ = defaultdict(_UpperCAmelCase ) for task_id, (candidates, test_case) in enumerate(zip(_UpperCAmelCase , _UpperCAmelCase ) ): for candidate in candidates: UpperCAmelCase__ = candidate + """\n""" + test_case UpperCAmelCase__ = (test_program, timeout, task_id, completion_id[task_id]) UpperCAmelCase__ = executor.submit(_UpperCAmelCase , *_UpperCAmelCase ) futures.append(_UpperCAmelCase ) completion_id[task_id] += 1 n_samples += 1 for future in as_completed(_UpperCAmelCase ): UpperCAmelCase__ = future.result() results[result["task_id"]].append((result["""completion_id"""], result) ) UpperCAmelCase__ , UpperCAmelCase__ = [], [] for result in results.values(): result.sort() UpperCAmelCase__ = [r[1]["""passed"""] for r in result] total.append(len(_UpperCAmelCase ) ) correct.append(sum(_UpperCAmelCase ) ) UpperCAmelCase__ = np.array(_UpperCAmelCase ) UpperCAmelCase__ = np.array(_UpperCAmelCase ) UpperCAmelCase__ = k UpperCAmelCase__ = {f'''pass@{k}''': estimate_pass_at_k(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).mean() for k in ks if (total >= k).all()} return pass_at_k, results def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] ): '''simple docstring''' def estimator(SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> float: if n - c < k: return 1.0 return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): UpperCAmelCase__ = itertools.repeat(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ) else: assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = iter(SCREAMING_SNAKE_CASE__ ) return np.array([estimator(int(SCREAMING_SNAKE_CASE__ ) , int(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) for n, c in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )] )
346
0
import copy import re class __lowerCamelCase : """simple docstring""" UpperCamelCase__ = "hp" UpperCamelCase__ = {} UpperCamelCase__ = None @classmethod def UpperCamelCase ( cls , UpperCAmelCase , UpperCAmelCase ): """simple docstring""" _UpperCAmelCase = prefix _UpperCAmelCase = defaults cls.build_naming_info() @staticmethod def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ): """simple docstring""" if len(UpperCAmelCase ) == 0: return "" _UpperCAmelCase = None if any(char.isdigit() for char in word ): raise Exception(F"""Parameters should not contain numbers: '{word}' contains a number""" ) if word in info["short_word"]: return info["short_word"][word] for prefix_len in range(1 , len(UpperCAmelCase ) + 1 ): _UpperCAmelCase = word[:prefix_len] if prefix in info["reverse_short_word"]: continue else: _UpperCAmelCase = prefix break if short_word is None: # Paranoid fallback def int_to_alphabetic(UpperCAmelCase ): _UpperCAmelCase = '' while integer != 0: _UpperCAmelCase = chr(ord('A' ) + integer % 10 ) + s integer //= 10 return s _UpperCAmelCase = 0 while True: _UpperCAmelCase = word + '#' + int_to_alphabetic(UpperCAmelCase ) if sword in info["reverse_short_word"]: continue else: _UpperCAmelCase = sword break _UpperCAmelCase = short_word _UpperCAmelCase = word return short_word @staticmethod def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ): """simple docstring""" _UpperCAmelCase = param_name.split('_' ) _UpperCAmelCase = [TrialShortNamer.shortname_for_word(UpperCAmelCase , UpperCAmelCase ) for word in words] # We try to create a separatorless short name, but if there is a collision we have to fallback # to a separated short name _UpperCAmelCase = ['', '_'] for separator in separators: _UpperCAmelCase = separator.join(UpperCAmelCase ) if shortname not in info["reverse_short_param"]: _UpperCAmelCase = shortname _UpperCAmelCase = param_name return shortname return param_name @staticmethod def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ): """simple docstring""" _UpperCAmelCase = TrialShortNamer.shortname_for_key(UpperCAmelCase , UpperCAmelCase ) _UpperCAmelCase = short_name _UpperCAmelCase = param_name @classmethod def UpperCamelCase ( cls ): """simple docstring""" if cls.NAMING_INFO is not None: return _UpperCAmelCase = { 'short_word': {}, 'reverse_short_word': {}, 'short_param': {}, 'reverse_short_param': {}, } _UpperCAmelCase = list(cls.DEFAULTS.keys() ) for k in field_keys: cls.add_new_param_name(UpperCAmelCase , UpperCAmelCase ) _UpperCAmelCase = info @classmethod def UpperCamelCase ( cls , UpperCAmelCase ): """simple docstring""" cls.build_naming_info() assert cls.PREFIX is not None _UpperCAmelCase = [copy.copy(cls.PREFIX )] for k, v in params.items(): if k not in cls.DEFAULTS: raise Exception(F"""You should provide a default value for the param name {k} with value {v}""" ) if v == cls.DEFAULTS[k]: # The default value is not added to the name continue _UpperCAmelCase = cls.NAMING_INFO['short_param'][k] if isinstance(UpperCAmelCase , UpperCAmelCase ): _UpperCAmelCase = 1 if v else 0 _UpperCAmelCase = '' if isinstance(UpperCAmelCase , (int, float) ) else '-' _UpperCAmelCase = F"""{key}{sep}{v}""" name.append(UpperCAmelCase ) return "_".join(UpperCAmelCase ) @classmethod def UpperCamelCase ( cls , UpperCAmelCase ): """simple docstring""" _UpperCAmelCase = repr[len(cls.PREFIX ) + 1 :] if repr == "": _UpperCAmelCase = [] else: _UpperCAmelCase = repr.split('_' ) _UpperCAmelCase = {} for value in values: if "-" in value: _UpperCAmelCase , _UpperCAmelCase = value.split('-' ) else: _UpperCAmelCase = re.sub('[0-9.]' , '' , UpperCAmelCase ) _UpperCAmelCase = float(re.sub('[^0-9.]' , '' , UpperCAmelCase ) ) _UpperCAmelCase = cls.NAMING_INFO['reverse_short_param'][p_k] _UpperCAmelCase = p_v for k in cls.DEFAULTS: if k not in parameters: _UpperCAmelCase = cls.DEFAULTS[k] return parameters
39
'''simple docstring''' import math def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or not number % 2: # Negatives, 0, 1 and all even numbers are not primes return False UpperCAmelCase__ = range(3 , int(math.sqrt(SCREAMING_SNAKE_CASE__ ) + 1 ) , 2 ) return not any(not number % i for i in odd_numbers ) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str]=1 , **SCREAMING_SNAKE_CASE__ : List[str] ): '''simple docstring''' UpperCAmelCase__ = factor * value UpperCAmelCase__ = value while not is_prime(SCREAMING_SNAKE_CASE__ ): value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1 if value == first_value_val: return next_prime(value + 1 , **SCREAMING_SNAKE_CASE__ ) return value
346
0
"""simple docstring""" def lowercase ( A_ , A_ , A_ , A_ )-> List[Any]: '''simple docstring''' a : List[Any] = [False] * len(A_ ) a : int = [] queue.append(A_ ) a : int = True while queue: a : List[str] = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(A_ ) a : Dict = True a : Optional[int] = u return visited[t] def lowercase ( A_ , A_ , A_ )-> str: '''simple docstring''' a : int = [-1] * (len(A_ )) a : List[Any] = 0 while bfs(A_ , A_ , A_ , A_ ): a : Tuple = float("Inf" ) a : List[str] = sink while s != source: # Find the minimum value in select path a : List[Any] = min(A_ , graph[parent[s]][s] ) a : str = parent[s] max_flow += path_flow a : Optional[Any] = sink while v != source: a : List[Any] = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow a : str = parent[v] return max_flow __lowercase = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] __lowercase , __lowercase = 0, 5 print(ford_fulkerson(graph, source, sink))
40
'''simple docstring''' import string from math import logaa def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' UpperCAmelCase__ = document.translate( str.maketrans("""""" , """""" , string.punctuation ) ).replace("""\n""" , """""" ) UpperCAmelCase__ = document_without_punctuation.split(""" """ ) # word tokenization return len([word for word in tokenize_document if word.lower() == term.lower()] ) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' UpperCAmelCase__ = corpus.lower().translate( str.maketrans("""""" , """""" , string.punctuation ) ) # strip all punctuation and replace it with '' UpperCAmelCase__ = corpus_without_punctuation.split("""\n""" ) UpperCAmelCase__ = term.lower() return (len([doc for doc in docs if term in doc] ), len(SCREAMING_SNAKE_CASE__ )) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple=False ): '''simple docstring''' if smoothing: if n == 0: raise ValueError("""log10(0) is undefined.""" ) return round(1 + logaa(n / (1 + df) ) , 3 ) if df == 0: raise ZeroDivisionError("""df must be > 0""" ) elif n == 0: raise ValueError("""log10(0) is undefined.""" ) return round(logaa(n / df ) , 3 ) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' return round(tf * idf , 3 )
346
0