code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
import inspect import unittest from transformers import DecisionTransformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import DecisionTransformerModel from transformers.models.decision_transformer.modeling_decision_transformer import ( DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) class a__ : def __init__( self , _A , _A=1_3 , _A=7 , _A=6 , _A=1_7 , _A=2_3 , _A=1_1 , _A=True , ): """simple docstring""" __lowerCAmelCase = parent __lowerCAmelCase = batch_size __lowerCAmelCase = seq_length __lowerCAmelCase = act_dim __lowerCAmelCase = state_dim __lowerCAmelCase = hidden_size __lowerCAmelCase = max_length __lowerCAmelCase = is_training def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = floats_tensor((self.batch_size, self.seq_length, self.state_dim) ) __lowerCAmelCase = floats_tensor((self.batch_size, self.seq_length, self.act_dim) ) __lowerCAmelCase = floats_tensor((self.batch_size, self.seq_length, 1) ) __lowerCAmelCase = floats_tensor((self.batch_size, self.seq_length, 1) ) __lowerCAmelCase = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1_0_0_0 ) __lowerCAmelCase = random_attention_mask((self.batch_size, self.seq_length) ) __lowerCAmelCase = self.get_config() return ( config, states, actions, rewards, returns_to_go, timesteps, attention_mask, ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return DecisionTransformerConfig( batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , ) def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A , _A , _A , ): """simple docstring""" __lowerCAmelCase = DecisionTransformerModel(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() __lowerCAmelCase = model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) self.parent.assertEqual(result.state_preds.shape , states.shape ) self.parent.assertEqual(result.action_preds.shape , actions.shape ) self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.prepare_config_and_inputs() ( ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ) = config_and_inputs __lowerCAmelCase = { "states": states, "actions": actions, "rewards": rewards, "returns_to_go": returns_to_go, "timesteps": timesteps, "attention_mask": attention_mask, } return config, inputs_dict @require_torch class a__ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ): _a : List[Any] = (DecisionTransformerModel,) if is_torch_available() else () _a : Optional[int] = () _a : Optional[int] = {"""feature-extraction""": DecisionTransformerModel} if is_torch_available() else {} # Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids _a : Optional[int] = False # Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features _a : Tuple = False _a : Optional[Any] = False _a : Dict = False _a : List[Any] = False _a : Tuple = False _a : int = False _a : Tuple = False _a : List[Any] = False _a : Union[str, Any] = False def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = DecisionTransformerModelTester(self ) __lowerCAmelCase = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=3_7 ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" self.config_tester.run_common_tests() def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCAmelCase ) @slow def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCAmelCase = DecisionTransformerModel.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCAmelCase = model_class(_UpperCAmelCase ) __lowerCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowerCAmelCase = [*signature.parameters.keys()] __lowerCAmelCase = [ "states", "actions", "rewards", "returns_to_go", "timesteps", "attention_mask", ] self.assertListEqual(arg_names[: len(_UpperCAmelCase )] , _UpperCAmelCase ) @require_torch class a__ ( unittest.TestCase ): @slow def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = 2 # number of steps of autoregressive prediction we will perform __lowerCAmelCase = 1_0 # defined by the RL environment, may be normalized __lowerCAmelCase = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-expert" ) __lowerCAmelCase = model.to(_UpperCAmelCase ) __lowerCAmelCase = model.config torch.manual_seed(0 ) __lowerCAmelCase = torch.randn(1 , 1 , config.state_dim ).to(device=_UpperCAmelCase , dtype=torch.floataa ) # env.reset() __lowerCAmelCase = torch.tensor( [[0.24_27_93, -0.28_69_30_74, 0.8_74_26_13], [0.67_81_52_74, -0.08_10_10_85, -0.12_95_21_47]] , device=_UpperCAmelCase ) __lowerCAmelCase = torch.tensor(_UpperCAmelCase , device=_UpperCAmelCase , dtype=torch.floataa ).reshape(1 , 1 , 1 ) __lowerCAmelCase = state __lowerCAmelCase = torch.zeros(1 , 0 , config.act_dim , device=_UpperCAmelCase , dtype=torch.floataa ) __lowerCAmelCase = torch.zeros(1 , 0 , device=_UpperCAmelCase , dtype=torch.floataa ) __lowerCAmelCase = torch.tensor(0 , device=_UpperCAmelCase , dtype=torch.long ).reshape(1 , 1 ) for step in range(_UpperCAmelCase ): __lowerCAmelCase = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=_UpperCAmelCase )] , dim=1 ) __lowerCAmelCase = torch.cat([rewards, torch.zeros(1 , 1 , device=_UpperCAmelCase )] , dim=1 ) __lowerCAmelCase = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device ) with torch.no_grad(): __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = model( states=_UpperCAmelCase , actions=_UpperCAmelCase , rewards=_UpperCAmelCase , returns_to_go=_UpperCAmelCase , timesteps=_UpperCAmelCase , attention_mask=_UpperCAmelCase , return_dict=_UpperCAmelCase , ) self.assertEqual(action_pred.shape , actions.shape ) self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1E-4 ) ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = ( # env.step(action) torch.randn(1 , 1 , config.state_dim ).to(device=_UpperCAmelCase , dtype=torch.floataa ), 1.0, False, {}, ) __lowerCAmelCase = action_pred[0, -1] __lowerCAmelCase = torch.cat([states, state] , dim=1 ) __lowerCAmelCase = returns_to_go[0, -1] - reward __lowerCAmelCase = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 ) __lowerCAmelCase = torch.cat( [timesteps, torch.ones((1, 1) , device=_UpperCAmelCase , dtype=torch.long ) * (step + 1)] , dim=1 )
92
'''simple docstring''' import doctest import logging import os import unittest from pathlib import Path from typing import List, Union import transformers from transformers.testing_utils import require_tf, require_torch, slow UpperCAmelCase_ = logging.getLogger() @unittest.skip("""Temporarily disable the doc tests.""" ) @require_torch @require_tf @slow class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : str , _UpperCAmelCase : Path , _UpperCAmelCase : Union[str, None] = None , _UpperCAmelCase : Union[List[str], None] = None , _UpperCAmelCase : Union[str, List[str], None] = None , _UpperCAmelCase : bool = True , ): """simple docstring""" UpperCAmelCase__ = [file for file in os.listdir(_UpperCAmelCase ) if os.path.isfile(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) )] if identifier is not None: UpperCAmelCase__ = [file for file in files if identifier in file] if n_identifier is not None: if isinstance(_UpperCAmelCase , _UpperCAmelCase ): for n_ in n_identifier: UpperCAmelCase__ = [file for file in files if n_ not in file] else: UpperCAmelCase__ = [file for file in files if n_identifier not in file] UpperCAmelCase__ = ignore_files or [] ignore_files.append("""__init__.py""" ) UpperCAmelCase__ = [file for file in files if file not in ignore_files] for file in files: # Open all files print("""Testing""" , _UpperCAmelCase ) if only_modules: UpperCAmelCase__ = file.split(""".""" )[0] try: UpperCAmelCase__ = getattr(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = doctest.DocTestSuite(_UpperCAmelCase ) UpperCAmelCase__ = unittest.TextTestRunner().run(_UpperCAmelCase ) self.assertIs(len(result.failures ) , 0 ) except AttributeError: logger.info(f'''{module_identifier} is not a module.''' ) else: UpperCAmelCase__ = doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS ) self.assertIs(result.failed , 0 ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" UpperCAmelCase__ = Path("""src/transformers""" ) UpperCAmelCase__ = """modeling""" UpperCAmelCase__ = [ """modeling_ctrl.py""", """modeling_tf_ctrl.py""", ] self.analyze_directory(_UpperCAmelCase , identifier=_UpperCAmelCase , ignore_files=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" UpperCAmelCase__ = Path("""src/transformers""" ) UpperCAmelCase__ = """tokenization""" self.analyze_directory(_UpperCAmelCase , identifier=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" UpperCAmelCase__ = Path("""src/transformers""" ) UpperCAmelCase__ = """configuration""" self.analyze_directory(_UpperCAmelCase , identifier=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" UpperCAmelCase__ = Path("""src/transformers""" ) UpperCAmelCase__ = ["""configuration""", """modeling""", """tokenization"""] self.analyze_directory(_UpperCAmelCase , n_identifier=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" UpperCAmelCase__ = Path("""docs/source""" ) UpperCAmelCase__ = ["""favicon.ico"""] self.analyze_directory(_UpperCAmelCase , ignore_files=_UpperCAmelCase , only_modules=_UpperCAmelCase )
346
0
from ..utils import DummyObject, requires_backends class UpperCAmelCase ( metaclass=lowerCamelCase_ ): '''simple docstring''' lowerCAmelCase_ = ["""flax"""] def __init__( self : str , *__lowercase : Optional[Any] , **__lowercase : Optional[int] ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def snake_case__ ( cls : Dict , *__lowercase : Tuple , **__lowercase : Optional[int] ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def snake_case__ ( cls : Union[str, Any] , *__lowercase : Optional[Any] , **__lowercase : Dict ): """simple docstring""" requires_backends(cls , ["flax"] ) class UpperCAmelCase ( metaclass=lowerCamelCase_ ): '''simple docstring''' lowerCAmelCase_ = ["""flax"""] def __init__( self : Optional[Any] , *__lowercase : int , **__lowercase : Dict ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def snake_case__ ( cls : Tuple , *__lowercase : Any , **__lowercase : str ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def snake_case__ ( cls : Any , *__lowercase : Optional[int] , **__lowercase : Optional[int] ): """simple docstring""" requires_backends(cls , ["flax"] ) class UpperCAmelCase ( metaclass=lowerCamelCase_ ): '''simple docstring''' lowerCAmelCase_ = ["""flax"""] def __init__( self : List[Any] , *__lowercase : Any , **__lowercase : List[Any] ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def snake_case__ ( cls : List[str] , *__lowercase : Optional[int] , **__lowercase : Union[str, Any] ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def snake_case__ ( cls : Dict , *__lowercase : Tuple , **__lowercase : List[Any] ): """simple docstring""" requires_backends(cls , ["flax"] ) class UpperCAmelCase ( metaclass=lowerCamelCase_ ): '''simple docstring''' lowerCAmelCase_ = ["""flax"""] def __init__( self : int , *__lowercase : str , **__lowercase : List[str] ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def snake_case__ ( cls : List[str] , *__lowercase : Tuple , **__lowercase : Tuple ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def snake_case__ ( cls : Dict , *__lowercase : Any , **__lowercase : Union[str, Any] ): """simple docstring""" requires_backends(cls , ["flax"] ) class UpperCAmelCase ( metaclass=lowerCamelCase_ ): '''simple docstring''' lowerCAmelCase_ = ["""flax"""] def __init__( self : Dict , *__lowercase : List[Any] , **__lowercase : str ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def snake_case__ ( cls : Tuple , *__lowercase : Dict , **__lowercase : List[Any] ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def snake_case__ ( cls : List[Any] , *__lowercase : Optional[Any] , **__lowercase : Tuple ): """simple docstring""" requires_backends(cls , ["flax"] ) class UpperCAmelCase ( metaclass=lowerCamelCase_ ): '''simple docstring''' lowerCAmelCase_ = ["""flax"""] def __init__( self : List[str] , *__lowercase : Union[str, Any] , **__lowercase : int ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def snake_case__ ( cls : Optional[int] , *__lowercase : Optional[int] , **__lowercase : Optional[Any] ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def snake_case__ ( cls : Tuple , *__lowercase : Tuple , **__lowercase : List[Any] ): """simple docstring""" requires_backends(cls , ["flax"] ) class UpperCAmelCase ( metaclass=lowerCamelCase_ ): '''simple docstring''' lowerCAmelCase_ = ["""flax"""] def __init__( self : Optional[Any] , *__lowercase : Union[str, Any] , **__lowercase : Tuple ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def snake_case__ ( cls : Optional[Any] , *__lowercase : int , **__lowercase : int ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def snake_case__ ( cls : Tuple , *__lowercase : Optional[int] , **__lowercase : Any ): """simple docstring""" requires_backends(cls , ["flax"] ) class UpperCAmelCase ( metaclass=lowerCamelCase_ ): '''simple docstring''' lowerCAmelCase_ = ["""flax"""] def __init__( self : Optional[Any] , *__lowercase : List[Any] , **__lowercase : int ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def snake_case__ ( cls : Union[str, Any] , *__lowercase : Optional[Any] , **__lowercase : Dict ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def snake_case__ ( cls : Union[str, Any] , *__lowercase : Tuple , **__lowercase : Optional[Any] ): """simple docstring""" requires_backends(cls , ["flax"] ) class UpperCAmelCase ( metaclass=lowerCamelCase_ ): '''simple docstring''' lowerCAmelCase_ = ["""flax"""] def __init__( self : Optional[int] , *__lowercase : Optional[Any] , **__lowercase : Optional[int] ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def snake_case__ ( cls : Optional[Any] , *__lowercase : Dict , **__lowercase : Tuple ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def snake_case__ ( cls : int , *__lowercase : Dict , **__lowercase : List[str] ): """simple docstring""" requires_backends(cls , ["flax"] ) class UpperCAmelCase ( metaclass=lowerCamelCase_ ): '''simple docstring''' lowerCAmelCase_ = ["""flax"""] def __init__( self : Tuple , *__lowercase : int , **__lowercase : List[str] ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def snake_case__ ( cls : Union[str, Any] , *__lowercase : Optional[int] , **__lowercase : Optional[Any] ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def snake_case__ ( cls : Dict , *__lowercase : Union[str, Any] , **__lowercase : Union[str, Any] ): """simple docstring""" requires_backends(cls , ["flax"] ) class UpperCAmelCase ( metaclass=lowerCamelCase_ ): '''simple docstring''' lowerCAmelCase_ = ["""flax"""] def __init__( self : List[str] , *__lowercase : List[str] , **__lowercase : Optional[Any] ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def snake_case__ ( cls : Tuple , *__lowercase : Any , **__lowercase : int ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def snake_case__ ( cls : List[Any] , *__lowercase : str , **__lowercase : List[Any] ): """simple docstring""" requires_backends(cls , ["flax"] ) class UpperCAmelCase ( metaclass=lowerCamelCase_ ): '''simple docstring''' lowerCAmelCase_ = ["""flax"""] def __init__( self : Union[str, Any] , *__lowercase : str , **__lowercase : int ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def snake_case__ ( cls : List[str] , *__lowercase : Tuple , **__lowercase : Dict ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def snake_case__ ( cls : Any , *__lowercase : Union[str, Any] , **__lowercase : List[str] ): """simple docstring""" requires_backends(cls , ["flax"] ) class UpperCAmelCase ( metaclass=lowerCamelCase_ ): '''simple docstring''' lowerCAmelCase_ = ["""flax"""] def __init__( self : Optional[int] , *__lowercase : List[Any] , **__lowercase : int ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def snake_case__ ( cls : Union[str, Any] , *__lowercase : List[Any] , **__lowercase : Union[str, Any] ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def snake_case__ ( cls : Optional[int] , *__lowercase : int , **__lowercase : List[str] ): """simple docstring""" requires_backends(cls , ["flax"] )
187
'''simple docstring''' from datasets.utils.patching import _PatchedModuleObj, patch_submodule from . import _test_patching def _UpperCamelCase ( ): '''simple docstring''' import os as original_os from os import path as original_path from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join UpperCAmelCase__ = """__test_patch_submodule_mock__""" with patch_submodule(_test_patching , """os.path.join""" , SCREAMING_SNAKE_CASE__ ): # Every way to access os.path.join must be patched, and the rest must stay untouched # check os.path.join assert isinstance(_test_patching.os , _PatchedModuleObj ) assert isinstance(_test_patching.os.path , _PatchedModuleObj ) assert _test_patching.os.path.join is mock # check path.join assert isinstance(_test_patching.path , _PatchedModuleObj ) assert _test_patching.path.join is mock # check join assert _test_patching.join is mock # check that the other attributes are untouched assert _test_patching.os.rename is original_rename assert _test_patching.path.dirname is original_dirname assert _test_patching.os.path.dirname is original_dirname # Even renamed modules or objects must be patched # check renamed_os.path.join assert isinstance(_test_patching.renamed_os , _PatchedModuleObj ) assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj ) assert _test_patching.renamed_os.path.join is mock # check renamed_path.join assert isinstance(_test_patching.renamed_path , _PatchedModuleObj ) assert _test_patching.renamed_path.join is mock # check renamed_join assert _test_patching.renamed_join is mock # check that the other attributes are untouched assert _test_patching.renamed_os.rename is original_rename assert _test_patching.renamed_path.dirname is original_dirname assert _test_patching.renamed_os.path.dirname is original_dirname # check that everthing is back to normal when the patch is over assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join def _UpperCamelCase ( ): '''simple docstring''' assert _test_patching.open is open UpperCAmelCase__ = """__test_patch_submodule_builtin_mock__""" # _test_patching has "open" in its globals assert _test_patching.open is open with patch_submodule(_test_patching , """open""" , SCREAMING_SNAKE_CASE__ ): assert _test_patching.open is mock # check that everthing is back to normal when the patch is over assert _test_patching.open is open def _UpperCamelCase ( ): '''simple docstring''' UpperCAmelCase__ = """__test_patch_submodule_missing_mock__""" with patch_submodule(_test_patching , """pandas.read_csv""" , SCREAMING_SNAKE_CASE__ ): pass def _UpperCamelCase ( ): '''simple docstring''' UpperCAmelCase__ = """__test_patch_submodule_missing_builtin_mock__""" # _test_patching doesn't have "len" in its globals assert getattr(_test_patching , """len""" , SCREAMING_SNAKE_CASE__ ) is None with patch_submodule(_test_patching , """len""" , SCREAMING_SNAKE_CASE__ ): assert _test_patching.len is mock assert _test_patching.len is len def _UpperCamelCase ( ): '''simple docstring''' UpperCAmelCase__ = """__test_patch_submodule_start_and_stop_mock__""" UpperCAmelCase__ = patch_submodule(_test_patching , """open""" , SCREAMING_SNAKE_CASE__ ) assert _test_patching.open is open patch.start() assert _test_patching.open is mock patch.stop() assert _test_patching.open is open def _UpperCamelCase ( ): '''simple docstring''' from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join UpperCAmelCase__ = """__test_patch_submodule_successive_join__""" UpperCAmelCase__ = """__test_patch_submodule_successive_dirname__""" UpperCAmelCase__ = """__test_patch_submodule_successive_rename__""" assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename with patch_submodule(_test_patching , """os.path.join""" , SCREAMING_SNAKE_CASE__ ): with patch_submodule(_test_patching , """os.rename""" , SCREAMING_SNAKE_CASE__ ): with patch_submodule(_test_patching , """os.path.dirname""" , SCREAMING_SNAKE_CASE__ ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename # try another order with patch_submodule(_test_patching , """os.rename""" , SCREAMING_SNAKE_CASE__ ): with patch_submodule(_test_patching , """os.path.join""" , SCREAMING_SNAKE_CASE__ ): with patch_submodule(_test_patching , """os.path.dirname""" , SCREAMING_SNAKE_CASE__ ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename def _UpperCamelCase ( ): '''simple docstring''' UpperCAmelCase__ = """__test_patch_submodule_doesnt_exist_mock__""" with patch_submodule(_test_patching , """__module_that_doesn_exist__.__attribute_that_doesn_exist__""" , SCREAMING_SNAKE_CASE__ ): pass with patch_submodule(_test_patching , """os.__attribute_that_doesn_exist__""" , SCREAMING_SNAKE_CASE__ ): pass
346
0
'''simple docstring''' import unittest from transformers import BertGenerationTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase__ = '''▁''' lowerCAmelCase__ = get_tests_dir('''fixtures/test_sentencepiece.model''') @require_sentencepiece class lowercase_ (lowerCamelCase_ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = BertGenerationTokenizer SCREAMING_SNAKE_CASE : str = False SCREAMING_SNAKE_CASE : Any = True def SCREAMING_SNAKE_CASE ( self : List[str] ): super().setUp() __lowercase = BertGenerationTokenizer(_UpperCAmelCase ,keep_accents=_UpperCAmelCase ) tokenizer.save_pretrained(self.tmpdirname ) def SCREAMING_SNAKE_CASE ( self : int ): __lowercase = '''<s>''' __lowercase = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) ,_UpperCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) ,_UpperCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Dict ): __lowercase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] ,'''<unk>''' ) self.assertEqual(vocab_keys[1] ,'''<s>''' ) self.assertEqual(vocab_keys[-1] ,'''<pad>''' ) self.assertEqual(len(_UpperCAmelCase ) ,1_0_0_2 ) def SCREAMING_SNAKE_CASE ( self : Tuple ): self.assertEqual(self.get_tokenizer().vocab_size ,1_0_0_0 ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): __lowercase = BertGenerationTokenizer(_UpperCAmelCase ,keep_accents=_UpperCAmelCase ) __lowercase = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(_UpperCAmelCase ,['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) ,[2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] ,) __lowercase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( _UpperCAmelCase ,[ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] ,) __lowercase = tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) self.assertListEqual( _UpperCAmelCase ,[8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] ,) __lowercase = tokenizer.convert_ids_to_tokens(_UpperCAmelCase ) self.assertListEqual( _UpperCAmelCase ,[ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] ,) @cached_property def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): return BertGenerationTokenizer.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' ) @slow def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): __lowercase = '''Hello World!''' __lowercase = [1_8_5_3_6, 2_2_6_0, 1_0_1] self.assertListEqual(_UpperCAmelCase ,self.big_tokenizer.encode(_UpperCAmelCase ) ) @slow def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): __lowercase = ( '''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will''' ''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth''' ) __lowercase = [ 8_7_1, 4_1_9, 3_5_8, 9_4_6, 9_9_1, 2_5_2_1, 4_5_2, 3_5_8, 1_3_5_7, 3_8_7, 7_7_5_1, 3_5_3_6, 1_1_2, 9_8_5, 4_5_6, 1_2_6, 8_6_5, 9_3_8, 5_4_0_0, 5_7_3_4, 4_5_8, 1_3_6_8, 4_6_7, 7_8_6, 2_4_6_2, 5_2_4_6, 1_1_5_9, 6_3_3, 8_6_5, 4_5_1_9, 4_5_7, 5_8_2, 8_5_2, 2_5_5_7, 4_2_7, 9_1_6, 5_0_8, 4_0_5, 3_4_3_2_4, 4_9_7, 3_9_1, 4_0_8, 1_1_3_4_2, 1_2_4_4, 3_8_5, 1_0_0, 9_3_8, 9_8_5, 4_5_6, 5_7_4, 3_6_2, 1_2_5_9_7, 3_2_0_0, 3_1_2_9, 1_1_7_2, ] self.assertListEqual(_UpperCAmelCase ,self.big_tokenizer.encode(_UpperCAmelCase ) ) @require_torch @slow def SCREAMING_SNAKE_CASE ( self : List[Any] ): import torch from transformers import BertGenerationConfig, BertGenerationEncoder # Build sequence __lowercase = list(self.big_tokenizer.get_vocab().keys() )[:1_0] __lowercase = ''' '''.join(_UpperCAmelCase ) __lowercase = self.big_tokenizer.encode_plus(_UpperCAmelCase ,return_tensors='''pt''' ,return_token_type_ids=_UpperCAmelCase ) __lowercase = self.big_tokenizer.batch_encode_plus( [sequence + ''' ''' + sequence] ,return_tensors='''pt''' ,return_token_type_ids=_UpperCAmelCase ) __lowercase = BertGenerationConfig() __lowercase = BertGenerationEncoder(_UpperCAmelCase ) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**_UpperCAmelCase ) model(**_UpperCAmelCase ) @slow def SCREAMING_SNAKE_CASE ( self : Tuple ): __lowercase = {'''input_ids''': [[3_9_2_8_6, 4_5_8, 3_6_3_3_5, 2_0_0_1, 4_5_6, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 7_7_4_6, 1_7_4_1, 1_1_1_5_7, 3_9_1, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 3_9_6_7, 3_5_4_1_2, 1_1_3, 4_9_3_6, 1_0_9, 3_8_7_0, 2_3_7_7, 1_1_3, 3_0_0_8_4, 4_5_7_2_0, 4_5_8, 1_3_4, 1_7_4_9_6, 1_1_2, 5_0_3, 1_1_6_7_2, 1_1_3, 1_1_8, 1_1_2, 5_6_6_5, 1_3_3_4_7, 3_8_6_8_7, 1_1_2, 1_4_9_6, 3_1_3_8_9, 1_1_2, 3_2_6_8, 4_7_2_6_4, 1_3_4, 9_6_2, 1_1_2, 1_6_3_7_7, 8_0_3_5, 2_3_1_3_0, 4_3_0, 1_2_1_6_9, 1_5_5_1_8, 2_8_5_9_2, 4_5_8, 1_4_6, 4_1_6_9_7, 1_0_9, 3_9_1, 1_2_1_6_9, 1_5_5_1_8, 1_6_6_8_9, 4_5_8, 1_4_6, 4_1_3_5_8, 1_0_9, 4_5_2, 7_2_6, 4_0_3_4, 1_1_1, 7_6_3, 3_5_4_1_2, 5_0_8_2, 3_8_8, 1_9_0_3, 1_1_1, 9_0_5_1, 3_9_1, 2_8_7_0, 4_8_9_1_8, 1_9_0_0, 1_1_2_3, 5_5_0, 9_9_8, 1_1_2, 9_5_8_6, 1_5_9_8_5, 4_5_5, 3_9_1, 4_1_0, 2_2_9_5_5, 3_7_6_3_6, 1_1_4], [4_4_8, 1_7_4_9_6, 4_1_9, 3_6_6_3, 3_8_5, 7_6_3, 1_1_3, 2_7_5_3_3, 2_8_7_0, 3_2_8_3, 1_3_0_4_3, 1_6_3_9, 2_4_7_1_3, 5_2_3, 6_5_6, 2_4_0_1_3, 1_8_5_5_0, 2_5_2_1, 5_1_7, 2_7_0_1_4, 2_1_2_4_4, 4_2_0, 1_2_1_2, 1_4_6_5, 3_9_1, 9_2_7, 4_8_3_3, 3_8_8, 5_7_8, 1_1_7_8_6, 1_1_4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_8_4, 2_1_6_9, 7_6_8_7, 2_1_9_3_2, 1_8_1_4_6, 7_2_6, 3_6_3, 1_7_0_3_2, 3_3_9_1, 1_1_4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_UpperCAmelCase ,model_name='''google/bert_for_seq_generation_L-24_bbc_encoder''' ,revision='''c817d1fd1be2ffa69431227a1fe320544943d4db''' ,)
104
'''simple docstring''' from timeit import timeit UpperCAmelCase_ = { 'MALAYALAM': True, 'String': False, 'rotor': True, 'level': True, 'A': True, 'BB': True, 'ABC': False, 'amanaplanacanalpanama': True, # "a man a plan a canal panama" } # Ensure our test data is valid assert all((key == key[::-1]) is value for key, value in test_data.items()) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' UpperCAmelCase__ = 0 UpperCAmelCase__ = len(SCREAMING_SNAKE_CASE__ ) - 1 while start_i < end_i: if s[start_i] == s[end_i]: start_i += 1 end_i -= 1 else: return False return True def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' UpperCAmelCase__ = len(SCREAMING_SNAKE_CASE__ ) // 2 UpperCAmelCase__ = len(SCREAMING_SNAKE_CASE__ ) # We need to traverse till half of the length of string # as we can get access of the i'th last element from # i'th index. # eg: [0,1,2,3,4,5] => 4th index can be accessed # with the help of 1st index (i==n-i-1) # where n is length of string return all(s[i] == s[n - i - 1] for i in range(SCREAMING_SNAKE_CASE__ ) ) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' if len(SCREAMING_SNAKE_CASE__ ) <= 2: return True if s[0] == s[len(SCREAMING_SNAKE_CASE__ ) - 1]: return is_palindrome_recursive(s[1:-1] ) else: return False def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' return s == s[::-1] def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' UpperCAmelCase__ = F'''all({name}(key) is value for key, value in test_data.items())''' UpperCAmelCase__ = F'''from __main__ import test_data, {name}''' UpperCAmelCase__ = 500000 UpperCAmelCase__ = timeit(stmt=SCREAMING_SNAKE_CASE__ , setup=SCREAMING_SNAKE_CASE__ , number=SCREAMING_SNAKE_CASE__ ) print(F'''{name:<35} finished {number:,} runs in {result:.5f} seconds''' ) if __name__ == "__main__": for key, value in test_data.items(): assert is_palindrome(key) is is_palindrome_recursive(key) assert is_palindrome(key) is is_palindrome_slice(key) print(f"{key:21} {value}") print('a man a plan a canal panama') # finished 500,000 runs in 0.46793 seconds benchmark_function('is_palindrome_slice') # finished 500,000 runs in 0.85234 seconds benchmark_function('is_palindrome') # finished 500,000 runs in 1.32028 seconds benchmark_function('is_palindrome_recursive') # finished 500,000 runs in 2.08679 seconds benchmark_function('is_palindrome_traversal')
346
0
'''simple docstring''' import unittest import numpy as np import torch from diffusers import VersatileDiffusionImageVariationPipeline from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device UpperCamelCase_ = False class _a ( unittest.TestCase ): '''simple docstring''' pass @slow @require_torch_gpu class _a ( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = VersatileDiffusionImageVariationPipeline.from_pretrained('shi-labs/versatile-diffusion' ) pipe.to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) SCREAMING_SNAKE_CASE : str = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' ) SCREAMING_SNAKE_CASE : Optional[int] = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Union[str, Any] = pipe( image=_UpperCAmelCase, generator=_UpperCAmelCase, guidance_scale=7.5, num_inference_steps=50, output_type='numpy', ).images SCREAMING_SNAKE_CASE : List[Any] = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE : str = np.array([0.04_41, 0.04_69, 0.05_07, 0.05_75, 0.06_32, 0.06_50, 0.08_65, 0.09_09, 0.09_45] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
251
'''simple docstring''' import datasets from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py UpperCAmelCase_ = '\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",\n author = "Lin, Chin-Yew and\n Och, Franz Josef",\n booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",\n month = "aug 23{--}aug 27",\n year = "2004",\n address = "Geneva, Switzerland",\n publisher = "COLING",\n url = "https://www.aclweb.org/anthology/C04-1072",\n pages = "501--507",\n}\n' UpperCAmelCase_ = '\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,\nthe better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n' UpperCAmelCase_ = '\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n \'bleu\': bleu score,\n \'precisions\': geometric mean of n-gram precisions,\n \'brevity_penalty\': brevity penalty,\n \'length_ratio\': ratio of lengths,\n \'translation_length\': translation_length,\n \'reference_length\': reference_length\nExamples:\n\n >>> predictions = [\n ... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample\n ... ["foo", "bar", "foobar"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)\n ... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric("bleu")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results["bleu"])\n 1.0\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase_ ( datasets.Metric ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : int ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ), """references""": datasets.Sequence( datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ), } ) , codebase_urls=["""https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"""] , reference_urls=[ """https://en.wikipedia.org/wiki/BLEU""", """https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""", ] , ) def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any]=4 , _UpperCAmelCase : Union[str, Any]=False ): """simple docstring""" UpperCAmelCase__ = compute_bleu( reference_corpus=_UpperCAmelCase , translation_corpus=_UpperCAmelCase , max_order=_UpperCAmelCase , smooth=_UpperCAmelCase ) ((UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__)) = score return { "bleu": bleu, "precisions": precisions, "brevity_penalty": bp, "length_ratio": ratio, "translation_length": translation_length, "reference_length": reference_length, }
346
0
"""simple docstring""" from __future__ import annotations import math def _lowercase ( __snake_case ) -> str: if num <= 0: __lowerCAmelCase : Optional[int] = F"""{num}: Invalid input, please enter a positive integer.""" raise ValueError(SCREAMING_SNAKE_CASE__ ) __lowerCAmelCase : Dict = [True] * (num + 1) __lowerCAmelCase : int = [] __lowerCAmelCase : Optional[Any] = 2 __lowerCAmelCase : List[str] = int(math.sqrt(SCREAMING_SNAKE_CASE__ ) ) while start <= end: # If start is a prime if sieve[start] is True: prime.append(SCREAMING_SNAKE_CASE__ ) # Set multiples of start be False for i in range(start * start ,num + 1 ,SCREAMING_SNAKE_CASE__ ): if sieve[i] is True: __lowerCAmelCase : int = False start += 1 for j in range(end + 1 ,num + 1 ): if sieve[j] is True: prime.append(SCREAMING_SNAKE_CASE__ ) return prime if __name__ == "__main__": print(prime_sieve(int(input('Enter a positive integer: ').strip())))
269
'''simple docstring''' from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import torch from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available @dataclass class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' lowerCAmelCase_ : Union[List[np.ndarray], torch.FloatTensor] try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_text_to_video_synth import TextToVideoSDPipeline from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401 from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
346
0
from ...configuration_utils import PretrainedConfig from ...utils import logging __a = logging.get_logger(__name__) __a = { '''google/vivit-b-16x2-kinetics400''': ( '''https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json''' ), # See all Vivit models at https://huggingface.co/models?filter=vivit } class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ ): A : Optional[int] = """vivit""" def __init__( self , SCREAMING_SNAKE_CASE__=224 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=[2, 16, 16] , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=768 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=3072 , SCREAMING_SNAKE_CASE__="gelu_fast" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-06 , SCREAMING_SNAKE_CASE__=True , **SCREAMING_SNAKE_CASE__ , ): lowercase : List[str] = hidden_size lowercase : Any = num_hidden_layers lowercase : int = num_attention_heads lowercase : List[str] = intermediate_size lowercase : Dict = hidden_act lowercase : Dict = hidden_dropout_prob lowercase : int = attention_probs_dropout_prob lowercase : Tuple = initializer_range lowercase : Any = layer_norm_eps lowercase : str = image_size lowercase : List[str] = num_frames lowercase : List[Any] = tubelet_size lowercase : List[Any] = num_channels lowercase : Tuple = qkv_bias super().__init__(**_UpperCAmelCase )
337
'''simple docstring''' import torch import torch.nn as nn from transformers.modeling_utils import ModuleUtilsMixin from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class lowerCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): '''simple docstring''' @register_to_config def __init__( self : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : float , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : bool = False , ): """simple docstring""" super().__init__() UpperCAmelCase__ = nn.Embedding(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = nn.Embedding(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = False UpperCAmelCase__ = nn.Dropout(p=_UpperCAmelCase ) UpperCAmelCase__ = TaConfig( vocab_size=_UpperCAmelCase , d_model=_UpperCAmelCase , num_heads=_UpperCAmelCase , d_kv=_UpperCAmelCase , d_ff=_UpperCAmelCase , dropout_rate=_UpperCAmelCase , feed_forward_proj=_UpperCAmelCase , is_decoder=_UpperCAmelCase , is_encoder_decoder=_UpperCAmelCase , ) UpperCAmelCase__ = nn.ModuleList() for lyr_num in range(_UpperCAmelCase ): UpperCAmelCase__ = TaBlock(_UpperCAmelCase ) self.encoders.append(_UpperCAmelCase ) UpperCAmelCase__ = TaLayerNorm(_UpperCAmelCase ) UpperCAmelCase__ = nn.Dropout(p=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : int , _UpperCAmelCase : str ): """simple docstring""" UpperCAmelCase__ = self.token_embedder(_UpperCAmelCase ) UpperCAmelCase__ = encoder_input_tokens.shape[1] UpperCAmelCase__ = torch.arange(_UpperCAmelCase , device=encoder_input_tokens.device ) x += self.position_encoding(_UpperCAmelCase ) UpperCAmelCase__ = self.dropout_pre(_UpperCAmelCase ) # inverted the attention mask UpperCAmelCase__ = encoder_input_tokens.size() UpperCAmelCase__ = self.get_extended_attention_mask(_UpperCAmelCase , _UpperCAmelCase ) for lyr in self.encoders: UpperCAmelCase__ = lyr(_UpperCAmelCase , _UpperCAmelCase )[0] UpperCAmelCase__ = self.layer_norm(_UpperCAmelCase ) return self.dropout_post(_UpperCAmelCase ), encoder_inputs_mask
346
0
def snake_case__ ( SCREAMING_SNAKE_CASE_ : list[int] ): '''simple docstring''' lowercase__ : str = len(SCREAMING_SNAKE_CASE__ ) for i in range(SCREAMING_SNAKE_CASE__ ): for j in range(i + 1 , SCREAMING_SNAKE_CASE__ ): if numbers[j] < numbers[i]: lowercase__ , lowercase__ : Any = numbers[j], numbers[i] return numbers if __name__ == "__main__": snake_case_ = input('''Enter numbers separated by a comma:\n''').strip() snake_case_ = [int(item) for item in user_input.split(''',''')] print(exchange_sort(unsorted))
214
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConfig, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaForPreTraining, WavaVecaProcessor, logging, ) from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification logging.set_verbosity_info() UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'adapter_layer': 'encoder.layers.*.adapter_layer', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', 'pooling_layer.linear': 'projector', 'pooling_layer.projection': 'classifier', } UpperCAmelCase_ = [ 'lm_head', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', 'projector', 'classifier', ] def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Tuple ): '''simple docstring''' UpperCAmelCase__ = {} with open(SCREAMING_SNAKE_CASE__ , """r""" ) as file: for line_number, line in enumerate(SCREAMING_SNAKE_CASE__ ): UpperCAmelCase__ = line.strip() if line: UpperCAmelCase__ = line.split() UpperCAmelCase__ = line_number UpperCAmelCase__ = words[0] UpperCAmelCase__ = value return result def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ): '''simple docstring''' for attribute in key.split(""".""" ): UpperCAmelCase__ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(SCREAMING_SNAKE_CASE__ ): UpperCAmelCase__ = PARAM_MAPPING[full_name.split(""".""" )[-1]] UpperCAmelCase__ = """param""" if weight_type is not None and weight_type != "param": UpperCAmelCase__ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).shape elif weight_type is not None and weight_type == "param": UpperCAmelCase__ = hf_pointer for attribute in hf_param_name.split(""".""" ): UpperCAmelCase__ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = shape_pointer.shape # let's reduce dimension UpperCAmelCase__ = value[0] else: UpperCAmelCase__ = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": UpperCAmelCase__ = value elif weight_type == "weight_g": UpperCAmelCase__ = value elif weight_type == "weight_v": UpperCAmelCase__ = value elif weight_type == "bias": UpperCAmelCase__ = value elif weight_type == "param": for attribute in hf_param_name.split(""".""" ): UpperCAmelCase__ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = value else: UpperCAmelCase__ = value logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' ) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(SCREAMING_SNAKE_CASE__ ): UpperCAmelCase__ = PARAM_MAPPING[full_name.split(""".""" )[-1]] UpperCAmelCase__ = """param""" if weight_type is not None and weight_type != "param": UpperCAmelCase__ = """.""".join([key, weight_type] ) elif weight_type is not None and weight_type == "param": UpperCAmelCase__ = """.""".join([key, hf_param_name] ) else: UpperCAmelCase__ = key UpperCAmelCase__ = value if """lm_head""" in full_key else value[0] UpperCAmelCase_ = { 'W_a': 'linear_1.weight', 'W_b': 'linear_2.weight', 'b_a': 'linear_1.bias', 'b_b': 'linear_2.bias', 'ln_W': 'norm.weight', 'ln_b': 'norm.bias', } def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None ): '''simple docstring''' UpperCAmelCase__ = False for key, mapped_key in MAPPING.items(): UpperCAmelCase__ = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: UpperCAmelCase__ = True if "*" in mapped_key: UpperCAmelCase__ = name.split(SCREAMING_SNAKE_CASE__ )[0].split(""".""" )[-2] UpperCAmelCase__ = mapped_key.replace("""*""" , SCREAMING_SNAKE_CASE__ ) if "weight_g" in name: UpperCAmelCase__ = """weight_g""" elif "weight_v" in name: UpperCAmelCase__ = """weight_v""" elif "bias" in name: UpperCAmelCase__ = """bias""" elif "weight" in name: # TODO: don't match quantizer.weight_proj UpperCAmelCase__ = """weight""" else: UpperCAmelCase__ = None if hf_dict is not None: rename_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else: set_recursively(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return is_used return is_used def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] ): '''simple docstring''' UpperCAmelCase__ = [] UpperCAmelCase__ = fairseq_model.state_dict() UpperCAmelCase__ = hf_model.wavaveca.feature_extractor for name, value in fairseq_dict.items(): UpperCAmelCase__ = False if "conv_layers" in name: load_conv_layer( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , hf_model.config.feat_extract_norm == """group""" , ) UpperCAmelCase__ = True else: UpperCAmelCase__ = load_wavaveca_layer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if not is_used: unused_weights.append(SCREAMING_SNAKE_CASE__ ) logger.warning(F'''Unused weights: {unused_weights}''' ) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' UpperCAmelCase__ = full_name.split("""conv_layers.""" )[-1] UpperCAmelCase__ = name.split(""".""" ) UpperCAmelCase__ = int(items[0] ) UpperCAmelCase__ = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) UpperCAmelCase__ = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) UpperCAmelCase__ = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' ) UpperCAmelCase__ = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' ) UpperCAmelCase__ = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(SCREAMING_SNAKE_CASE__ ) @torch.no_grad() def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False ): '''simple docstring''' if config_path is not None: UpperCAmelCase__ = WavaVecaConfig.from_pretrained(SCREAMING_SNAKE_CASE__ ) else: UpperCAmelCase__ = WavaVecaConfig() if is_seq_class: UpperCAmelCase__ = read_txt_into_dict(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = idalabel UpperCAmelCase__ = WavaVecaForSequenceClassification(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , ) feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE__ ) elif is_finetuned: if dict_path: UpperCAmelCase__ = Dictionary.load(SCREAMING_SNAKE_CASE__ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq UpperCAmelCase__ = target_dict.pad_index UpperCAmelCase__ = target_dict.bos_index UpperCAmelCase__ = target_dict.eos_index UpperCAmelCase__ = len(target_dict.symbols ) UpperCAmelCase__ = os.path.join(SCREAMING_SNAKE_CASE__ , """vocab.json""" ) if not os.path.isdir(SCREAMING_SNAKE_CASE__ ): logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(SCREAMING_SNAKE_CASE__ ) ) return os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = target_dict.indices # fairseq has the <pad> and <s> switched UpperCAmelCase__ = 0 UpperCAmelCase__ = 1 with open(SCREAMING_SNAKE_CASE__ , """w""" , encoding="""utf-8""" ) as vocab_handle: json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = WavaVecaCTCTokenizer( SCREAMING_SNAKE_CASE__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=SCREAMING_SNAKE_CASE__ , ) UpperCAmelCase__ = True if config.feat_extract_norm == """layer""" else False UpperCAmelCase__ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , ) UpperCAmelCase__ = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ ) processor.save_pretrained(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = WavaVecaForCTC(SCREAMING_SNAKE_CASE__ ) else: UpperCAmelCase__ = WavaVecaForPreTraining(SCREAMING_SNAKE_CASE__ ) if is_finetuned or is_seq_class: UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) else: UpperCAmelCase__ = argparse.Namespace(task="""audio_pretraining""" ) UpperCAmelCase__ = fairseq.tasks.setup_task(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = model[0].eval() recursively_load_weights(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , not is_finetuned ) hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not' ) parser.add_argument( '--is_seq_class', action='store_true', help='Whether the model to convert is a fine-tuned sequence classification model or not', ) UpperCAmelCase_ = parser.parse_args() UpperCAmelCase_ = not args.not_finetuned and not args.is_seq_class convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, is_finetuned, args.is_seq_class, )
346
0
import heapq def _UpperCamelCase ( lowercase__ ): __SCREAMING_SNAKE_CASE : int = [] # for each node and his adjacency list add them and the rank of the node to queue # using heapq module the queue will be filled like a Priority Queue # heapq works with a min priority queue, so I used -1*len(v) to build it for key, value in graph.items(): # O(log(n)) heapq.heappush(SCREAMING_SNAKE_CASE__ , [-1 * len(SCREAMING_SNAKE_CASE__ ), (key, value)] ) # chosen_vertices = set of chosen vertices __SCREAMING_SNAKE_CASE : int = set() # while queue isn't empty and there are still edges # (queue[0][0] is the rank of the node with max rank) while queue and queue[0][0] != 0: # extract vertex with max rank from queue and add it to chosen_vertices __SCREAMING_SNAKE_CASE : Tuple = heapq.heappop(SCREAMING_SNAKE_CASE__ )[1][0] chosen_vertices.add(SCREAMING_SNAKE_CASE__ ) # Remove all arcs adjacent to argmax for elem in queue: # if v haven't adjacent node, skip if elem[0] == 0: continue # if argmax is reachable from elem # remove argmax from elem's adjacent list and update his rank if argmax in elem[1][1]: __SCREAMING_SNAKE_CASE : Union[str, Any] = elem[1][1].index(SCREAMING_SNAKE_CASE__ ) del elem[1][1][index] elem[0] += 1 # re-order the queue heapq.heapify(SCREAMING_SNAKE_CASE__ ) return chosen_vertices if __name__ == "__main__": import doctest doctest.testmod() __lowerCAmelCase : Optional[Any] ={0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]} print(f"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
9
'''simple docstring''' import itertools import os from collections import Counter, defaultdict from concurrent.futures import ThreadPoolExecutor, as_completed import numpy as np import datasets from .execute import check_correctness UpperCAmelCase_ = '\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n' UpperCAmelCase_ = '\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper "Evaluating Large Language Models Trained on Code"\n(https://arxiv.org/abs/2107.03374).\n' UpperCAmelCase_ = '\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric("code_eval")\n >>> test_cases = ["assert add(2,3)==5"]\n >>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {\'pass@1\': 0.5, \'pass@2\': 1.0}\n' UpperCAmelCase_ = '\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe "code_eval" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper "Evaluating Large\nLanguage Models Trained on Code" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"\n\n################################################################################\\n' UpperCAmelCase_ = 'The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the "Software"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase_ ( datasets.Metric ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" return datasets.MetricInfo( # This is the description that will appear on the metrics page. description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""" ) ), """references""": datasets.Value("""string""" ), } ) , homepage="""https://github.com/openai/human-eval""" , codebase_urls=["""https://github.com/openai/human-eval"""] , reference_urls=["""https://github.com/openai/human-eval"""] , license=_LICENSE , ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str]=[1, 10, 1_00] , _UpperCAmelCase : Optional[Any]=4 , _UpperCAmelCase : Any=3.0 ): """simple docstring""" if os.getenv("""HF_ALLOW_CODE_EVAL""" , 0 ) != "1": raise ValueError(_WARNING ) if os.name == "nt": raise NotImplementedError("""This metric is currently not supported on Windows.""" ) with ThreadPoolExecutor(max_workers=_UpperCAmelCase ) as executor: UpperCAmelCase__ = [] UpperCAmelCase__ = Counter() UpperCAmelCase__ = 0 UpperCAmelCase__ = defaultdict(_UpperCAmelCase ) for task_id, (candidates, test_case) in enumerate(zip(_UpperCAmelCase , _UpperCAmelCase ) ): for candidate in candidates: UpperCAmelCase__ = candidate + """\n""" + test_case UpperCAmelCase__ = (test_program, timeout, task_id, completion_id[task_id]) UpperCAmelCase__ = executor.submit(_UpperCAmelCase , *_UpperCAmelCase ) futures.append(_UpperCAmelCase ) completion_id[task_id] += 1 n_samples += 1 for future in as_completed(_UpperCAmelCase ): UpperCAmelCase__ = future.result() results[result["task_id"]].append((result["""completion_id"""], result) ) UpperCAmelCase__ , UpperCAmelCase__ = [], [] for result in results.values(): result.sort() UpperCAmelCase__ = [r[1]["""passed"""] for r in result] total.append(len(_UpperCAmelCase ) ) correct.append(sum(_UpperCAmelCase ) ) UpperCAmelCase__ = np.array(_UpperCAmelCase ) UpperCAmelCase__ = np.array(_UpperCAmelCase ) UpperCAmelCase__ = k UpperCAmelCase__ = {f'''pass@{k}''': estimate_pass_at_k(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).mean() for k in ks if (total >= k).all()} return pass_at_k, results def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] ): '''simple docstring''' def estimator(SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> float: if n - c < k: return 1.0 return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): UpperCAmelCase__ = itertools.repeat(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ) else: assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = iter(SCREAMING_SNAKE_CASE__ ) return np.array([estimator(int(SCREAMING_SNAKE_CASE__ ) , int(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) for n, c in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )] )
346
0
"""simple docstring""" import inspect import unittest from transformers import ViTHybridConfig from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class UpperCAmelCase : """simple docstring""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=64 , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=10 , _UpperCAmelCase=0.02 , _UpperCAmelCase=[1, 16, 4, 4] , _UpperCAmelCase=None , ): lowercase__: List[str] = parent lowercase__: Union[str, Any] = batch_size lowercase__: Optional[int] = image_size lowercase__: Dict = patch_size lowercase__: List[str] = num_channels lowercase__: List[str] = is_training lowercase__: str = use_labels lowercase__: List[Any] = hidden_size lowercase__: Optional[Any] = num_hidden_layers lowercase__: int = num_attention_heads lowercase__: List[Any] = intermediate_size lowercase__: List[str] = hidden_act lowercase__: List[Any] = hidden_dropout_prob lowercase__: str = attention_probs_dropout_prob lowercase__: Dict = type_sequence_label_size lowercase__: Union[str, Any] = initializer_range lowercase__: int = scope lowercase__: Optional[int] = backbone_featmap_shape # in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) # the number of patches is based on the feature map of the backbone, which by default uses an output stride # of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size lowercase__: List[str] = (self.image_size // 32) ** 2 lowercase__: Tuple = num_patches + 1 def _snake_case ( self ): lowercase__: str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase__: str = None if self.use_labels: lowercase__: Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase__: Optional[Any] = self.get_config() return config, pixel_values, labels def _snake_case ( self ): lowercase__: Union[str, Any] = { '''global_padding''': '''same''', '''layer_type''': '''bottleneck''', '''depths''': [3, 4, 9], '''out_features''': ['''stage1''', '''stage2''', '''stage3'''], '''embedding_dynamic_padding''': True, '''hidden_sizes''': [4, 8, 16, 32], '''num_groups''': 2, } return ViTHybridConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=_UpperCAmelCase , ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: Optional[int] = ViTHybridModel(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() lowercase__: int = model(_UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: str = self.type_sequence_label_size lowercase__: Optional[int] = ViTHybridForImageClassification(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() lowercase__: Optional[int] = model(_UpperCAmelCase , labels=_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _snake_case ( self ): lowercase__: List[Any] = self.prepare_config_and_inputs() lowercase__, lowercase__, lowercase__: List[str] = config_and_inputs lowercase__: Dict = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class UpperCAmelCase (lowerCamelCase_ ,lowerCamelCase_ ,unittest.TestCase ): """simple docstring""" _UpperCAmelCase :Union[str, Any] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else () _UpperCAmelCase :Optional[int] = ( {"""feature-extraction""": ViTHybridModel, """image-classification""": ViTHybridForImageClassification} if is_torch_available() else {} ) _UpperCAmelCase :Union[str, Any] = False _UpperCAmelCase :Any = False _UpperCAmelCase :Optional[int] = False def _snake_case ( self ): lowercase__: str = ViTHybridModelTester(self ) lowercase__: int = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 ) def _snake_case ( self ): self.config_tester.run_common_tests() @unittest.skip(reason='''ViT does not use inputs_embeds''' ) def _snake_case ( self ): pass def _snake_case ( self ): lowercase__, lowercase__: Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__: List[str] = model_class(_UpperCAmelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowercase__: List[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear ) ) def _snake_case ( self ): lowercase__, lowercase__: int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__: Optional[Any] = model_class(_UpperCAmelCase ) lowercase__: str = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__: Tuple = [*signature.parameters.keys()] lowercase__: int = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _UpperCAmelCase ) def _snake_case ( self ): lowercase__: str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCAmelCase ) def _snake_case ( self ): lowercase__: str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase ) def _snake_case ( self ): lowercase__, lowercase__: str = self.model_tester.prepare_config_and_inputs_for_common() lowercase__: List[str] = _config_zero_init(_UpperCAmelCase ) for model_class in self.all_model_classes: lowercase__: List[str] = model_class(config=_UpperCAmelCase ) # Skip the check for the backbone for name, module in model.named_modules(): if module.__class__.__name__ == "ViTHybridPatchEmbeddings": lowercase__: Dict = [F"""{name}.{key}""" for key in module.state_dict().keys()] break for name, param in model.named_parameters(): if param.requires_grad: if name in backbone_params: continue self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , ) @slow def _snake_case ( self ): for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__: Union[str, Any] = ViTHybridModel.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( ) -> Optional[Any]: lowercase__: Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class UpperCAmelCase (unittest.TestCase ): """simple docstring""" @cached_property def _snake_case ( self ): return ( ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def _snake_case ( self ): lowercase__: str = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to( _UpperCAmelCase ) lowercase__: Union[str, Any] = self.default_image_processor lowercase__: List[str] = prepare_img() lowercase__: Optional[int] = image_processor(images=_UpperCAmelCase , return_tensors='''pt''' ).to(_UpperCAmelCase ) # forward pass with torch.no_grad(): lowercase__: Union[str, Any] = model(**_UpperCAmelCase ) # verify the logits lowercase__: str = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , _UpperCAmelCase ) lowercase__: Optional[int] = torch.tensor([-1.9_090, -0.4_993, -0.2_389] ).to(_UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1e-4 ) ) @slow @require_accelerate def _snake_case ( self ): lowercase__: int = ViTHybridImageProcessor.from_pretrained('''google/vit-hybrid-base-bit-384''' ) lowercase__: List[str] = ViTHybridForImageClassification.from_pretrained('''google/vit-hybrid-base-bit-384''' , device_map='''auto''' ) lowercase__: Tuple = prepare_img() lowercase__: List[Any] = image_processor(images=_UpperCAmelCase , return_tensors='''pt''' ) lowercase__: Optional[int] = model(**_UpperCAmelCase ) lowercase__: List[str] = outputs.logits # model predicts one of the 1000 ImageNet classes lowercase__: List[Any] = logits.argmax(-1 ).item() self.assertTrue(model.config.idalabel[predicted_class_idx] , '''tabby, tabby cat''' )
177
'''simple docstring''' import math def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or not number % 2: # Negatives, 0, 1 and all even numbers are not primes return False UpperCAmelCase__ = range(3 , int(math.sqrt(SCREAMING_SNAKE_CASE__ ) + 1 ) , 2 ) return not any(not number % i for i in odd_numbers ) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str]=1 , **SCREAMING_SNAKE_CASE__ : List[str] ): '''simple docstring''' UpperCAmelCase__ = factor * value UpperCAmelCase__ = value while not is_prime(SCREAMING_SNAKE_CASE__ ): value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1 if value == first_value_val: return next_prime(value + 1 , **SCREAMING_SNAKE_CASE__ ) return value
346
0
'''simple docstring''' import argparse import logging import os import time import timeit import datasets import numpy as np import pycuda.autoinit # noqa: F401 import pycuda.driver as cuda import tensorrt as trt import torch from absl import logging as absl_logging from accelerate import Accelerator from datasets import load_dataset, load_metric from torch.utils.data import DataLoader from utils_qa import postprocess_qa_predictions import transformers from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed from transformers.trainer_pt_utils import nested_concat, nested_truncate __a: List[str] = trt.Logger(trt.Logger.WARNING) __a: Optional[Any] = absl_logging.get_absl_logger() absl_logger.setLevel(logging.WARNING) __a: Union[str, Any] = logging.getLogger(__name__) __a: List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--onnx_model_path""", default=None, type=str, required=True, help="""Path to ONNX model: """, ) parser.add_argument( """--output_dir""", default=None, type=str, required=True, help="""The output directory where the model checkpoints and predictions will be written.""", ) # Other parameters parser.add_argument( """--tokenizer_name""", default="""""", type=str, required=True, help="""Pretrained tokenizer name or path if not the same as model_name""", ) parser.add_argument( """--version_2_with_negative""", action="""store_true""", help="""If true, the SQuAD examples contain some that do not have an answer.""", ) parser.add_argument( """--null_score_diff_threshold""", type=float, default=0.0, help="""If null_score - best_non_null is greater than the threshold predict null.""", ) parser.add_argument( """--max_seq_length""", default=3_84, type=int, help=( """The maximum total input sequence length after WordPiece tokenization. Sequences """ """longer than this will be truncated, and sequences shorter than this will be padded.""" ), ) parser.add_argument( """--doc_stride""", default=1_28, type=int, help="""When splitting up a long document into chunks, how much stride to take between chunks.""", ) parser.add_argument("""--per_device_eval_batch_size""", default=8, type=int, help="""Batch size per GPU/CPU for evaluation.""") parser.add_argument( """--n_best_size""", default=20, type=int, help="""The total number of n-best predictions to generate in the nbest_predictions.json output file.""", ) parser.add_argument( """--max_answer_length""", default=30, type=int, help=( """The maximum length of an answer that can be generated. This is needed because the start """ """and end predictions are not conditioned on one another.""" ), ) parser.add_argument("""--seed""", type=int, default=42, help="""random seed for initialization""") parser.add_argument( """--dataset_name""", type=str, default=None, required=True, help="""The name of the dataset to use (via the datasets library).""", ) parser.add_argument( """--dataset_config_name""", type=str, default=None, help="""The configuration name of the dataset to use (via the datasets library).""", ) parser.add_argument( """--preprocessing_num_workers""", type=int, default=4, help="""A csv or a json file containing the training data.""" ) parser.add_argument("""--overwrite_cache""", action="""store_true""", help="""Overwrite the cached training and evaluation sets""") parser.add_argument( """--fp16""", action="""store_true""", help="""Whether to use 16-bit (mixed) precision instead of 32-bit""", ) parser.add_argument( """--int8""", action="""store_true""", help="""Whether to use INT8""", ) __a: Optional[int] = parser.parse_args() if args.tokenizer_name: __a: Tuple = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True) else: raise ValueError( """You are instantiating a new tokenizer from scratch. This is not supported by this script.""" """You can do it from another script, save it, and load it from here, using --tokenizer_name.""" ) logger.info("""Training/evaluation parameters %s""", args) __a: Optional[int] = args.per_device_eval_batch_size __a: Tuple = (args.eval_batch_size, args.max_seq_length) # TRT Engine properties __a: Dict = True __a: List[str] = """temp_engine/bert-fp32.engine""" if args.fpaa: __a: Dict = """temp_engine/bert-fp16.engine""" if args.inta: __a: int = """temp_engine/bert-int8.engine""" # import ONNX file if not os.path.exists("""temp_engine"""): os.makedirs("""temp_engine""") __a: List[str] = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH) with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser( network, TRT_LOGGER ) as parser: with open(args.onnx_model_path, """rb""") as model: if not parser.parse(model.read()): for error in range(parser.num_errors): print(parser.get_error(error)) # Query input names and shapes from parsed TensorRT network __a: Dict = [network.get_input(i) for i in range(network.num_inputs)] __a: List[str] = [_input.name for _input in network_inputs] # ex: ["actual_input1"] with builder.create_builder_config() as config: __a: Union[str, Any] = 1 << 50 if STRICT_TYPES: config.set_flag(trt.BuilderFlag.STRICT_TYPES) if args.fpaa: config.set_flag(trt.BuilderFlag.FPaa) if args.inta: config.set_flag(trt.BuilderFlag.INTa) __a: Any = builder.create_optimization_profile() config.add_optimization_profile(profile) for i in range(len(input_names)): profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE) __a: Union[str, Any] = builder.build_engine(network, config) # serialize_engine and store in file (can be directly loaded and deserialized): with open(engine_name, """wb""") as f: f.write(engine.serialize()) def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ): lowercase__ : Optional[int] = np.asarray(inputs['''input_ids'''] , dtype=np.intaa ) lowercase__ : int = np.asarray(inputs['''attention_mask'''] , dtype=np.intaa ) lowercase__ : Dict = np.asarray(inputs['''token_type_ids'''] , dtype=np.intaa ) # Copy inputs cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , SCREAMING_SNAKE_CASE__ ) cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , SCREAMING_SNAKE_CASE__ ) cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , SCREAMING_SNAKE_CASE__ ) # start time lowercase__ : Any = time.time() # Run inference context.execute_async( bindings=[int(SCREAMING_SNAKE_CASE__ ) for d_inp in d_inputs] + [int(SCREAMING_SNAKE_CASE__ ), int(SCREAMING_SNAKE_CASE__ )] , stream_handle=stream.handle ) # Transfer predictions back from GPU cuda.memcpy_dtoh_async(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) cuda.memcpy_dtoh_async(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Synchronize the stream and take time stream.synchronize() # end time lowercase__ : Union[str, Any] = time.time() lowercase__ : Union[str, Any] = end_time - start_time lowercase__ : List[Any] = (h_outputa, h_outputa) # print(outputs) return outputs, infer_time # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. __a: Dict = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO, ) # Setup logging, we only want one process per machine to log things on the screen. # accelerator.is_local_main_process is only True for one process per machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). if args.dataset_name is not None: # Downloading and loading a dataset from the hub. __a: Optional[Any] = load_dataset(args.dataset_name, args.dataset_config_name) else: raise ValueError("""Evaluation requires a dataset name""") # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Preprocessing the datasets. # Preprocessing is slighlty different for training and evaluation. __a: List[str] = raw_datasets["""validation"""].column_names __a: Any = """question""" if """question""" in column_names else column_names[0] __a: List[Any] = """context""" if """context""" in column_names else column_names[1] __a: str = """answers""" if """answers""" in column_names else column_names[2] # Padding side determines if we do (question|context) or (context|question). __a: List[Any] = tokenizer.padding_side == """right""" if args.max_seq_length > tokenizer.model_max_length: logger.warning( F'The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the' F'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' ) __a: Tuple = min(args.max_seq_length, tokenizer.model_max_length) def __UpperCamelCase ( UpperCAmelCase ): lowercase__ : List[Any] = [q.lstrip() for q in examples[question_column_name]] # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results # in one example possible giving several features when a context is long, each of those features having a # context that overlaps a bit the context of the previous feature. lowercase__ : Tuple = tokenizer( examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation='''only_second''' if pad_on_right else '''only_first''' , max_length=SCREAMING_SNAKE_CASE__ , stride=args.doc_stride , return_overflowing_tokens=SCREAMING_SNAKE_CASE__ , return_offsets_mapping=SCREAMING_SNAKE_CASE__ , padding='''max_length''' , ) # Since one example might give us several features if it has a long context, we need a map from a feature to # its corresponding example. This key gives us just that. lowercase__ : Tuple = tokenized_examples.pop('''overflow_to_sample_mapping''' ) # For evaluation, we will need to convert our predictions to substrings of the context, so we keep the # corresponding example_id and we will store the offset mappings. lowercase__ : Optional[Any] = [] for i in range(len(tokenized_examples['''input_ids'''] ) ): # Grab the sequence corresponding to that example (to know what is the context and what is the question). lowercase__ : Optional[Any] = tokenized_examples.sequence_ids(SCREAMING_SNAKE_CASE__ ) lowercase__ : List[Any] = 1 if pad_on_right else 0 # One example can give several spans, this is the index of the example containing this span of text. lowercase__ : Optional[Any] = sample_mapping[i] tokenized_examples["example_id"].append(examples['''id'''][sample_index] ) # Set to None the offset_mapping that are not part of the context so it's easy to determine if a token # position is part of the context or not. lowercase__ : int = [ (o if sequence_ids[k] == context_index else None) for k, o in enumerate(tokenized_examples['''offset_mapping'''][i] ) ] return tokenized_examples __a: Optional[Any] = raw_datasets["""validation"""] # Validation Feature Creation __a: List[str] = eval_examples.map( prepare_validation_features, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not args.overwrite_cache, desc="""Running tokenizer on validation dataset""", ) __a: List[str] = default_data_collator __a: Any = eval_dataset.remove_columns(["""example_id""", """offset_mapping"""]) __a: List[str] = DataLoader( eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size ) def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase="eval" ): lowercase__ : Union[str, Any] = postprocess_qa_predictions( examples=SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , predictions=SCREAMING_SNAKE_CASE__ , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=SCREAMING_SNAKE_CASE__ , ) # Format the result to the format the metric expects. if args.version_2_with_negative: lowercase__ : Optional[int] = [ {'''id''': k, '''prediction_text''': v, '''no_answer_probability''': 0.0} for k, v in predictions.items() ] else: lowercase__ : List[str] = [{'''id''': k, '''prediction_text''': v} for k, v in predictions.items()] lowercase__ : str = [{'''id''': ex['''id'''], '''answers''': ex[answer_column_name]} for ex in examples] return EvalPrediction(predictions=SCREAMING_SNAKE_CASE__ , label_ids=SCREAMING_SNAKE_CASE__ ) __a: List[str] = load_metric("""squad_v2""" if args.version_2_with_negative else """squad""") # Evaluation! logger.info("""Loading ONNX model %s for evaluation""", args.onnx_model_path) with open(engine_name, """rb""") as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine( f.read() ) as engine, engine.create_execution_context() as context: # setup for TRT inferrence for i in range(len(input_names)): context.set_binding_shape(i, INPUT_SHAPE) assert context.all_binding_shapes_specified def __UpperCamelCase ( UpperCAmelCase ): return trt.volume(engine.get_binding_shape(SCREAMING_SNAKE_CASE__ ) ) * engine.get_binding_dtype(SCREAMING_SNAKE_CASE__ ).itemsize # Allocate device memory for inputs and outputs. __a: Any = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)] # Allocate output buffer __a: Dict = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa) __a: List[Any] = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa) __a: Optional[Any] = cuda.mem_alloc(h_outputa.nbytes) __a: List[str] = cuda.mem_alloc(h_outputa.nbytes) # Create a stream in which to copy inputs/outputs and run inference. __a: Any = cuda.Stream() # Evaluation logger.info("""***** Running Evaluation *****""") logger.info(F' Num examples = {len(eval_dataset)}') logger.info(F' Batch size = {args.per_device_eval_batch_size}') __a: Optional[Any] = 0.0 __a: Any = 0 __a: int = timeit.default_timer() __a: int = None for step, batch in enumerate(eval_dataloader): __a , __a: List[str] = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream) total_time += infer_time niter += 1 __a , __a: Tuple = outputs __a: str = torch.tensor(start_logits) __a: List[str] = torch.tensor(end_logits) # necessary to pad predictions and labels for being gathered __a: Tuple = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-1_00) __a: List[str] = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-1_00) __a: Tuple = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy()) __a: List[str] = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-1_00) if all_preds is not None: __a: List[str] = nested_truncate(all_preds, len(eval_dataset)) __a: str = timeit.default_timer() - start_time logger.info(""" Evaluation done in total %f secs (%f sec per example)""", evalTime, evalTime / len(eval_dataset)) # Inference time from TRT logger.info("""Average Inference Time = {:.3f} ms""".format(total_time * 10_00 / niter)) logger.info("""Total Inference Time = {:.3f} ms""".format(total_time * 10_00)) logger.info("""Total Number of Inference = %d""", niter) __a: int = post_processing_function(eval_examples, eval_dataset, all_preds) __a: Optional[int] = metric.compute(predictions=prediction.predictions, references=prediction.label_ids) logger.info(F'Evaluation metrics: {eval_metric}')
198
'''simple docstring''' import string from math import logaa def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' UpperCAmelCase__ = document.translate( str.maketrans("""""" , """""" , string.punctuation ) ).replace("""\n""" , """""" ) UpperCAmelCase__ = document_without_punctuation.split(""" """ ) # word tokenization return len([word for word in tokenize_document if word.lower() == term.lower()] ) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' UpperCAmelCase__ = corpus.lower().translate( str.maketrans("""""" , """""" , string.punctuation ) ) # strip all punctuation and replace it with '' UpperCAmelCase__ = corpus_without_punctuation.split("""\n""" ) UpperCAmelCase__ = term.lower() return (len([doc for doc in docs if term in doc] ), len(SCREAMING_SNAKE_CASE__ )) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple=False ): '''simple docstring''' if smoothing: if n == 0: raise ValueError("""log10(0) is undefined.""" ) return round(1 + logaa(n / (1 + df) ) , 3 ) if df == 0: raise ZeroDivisionError("""df must be > 0""" ) elif n == 0: raise ValueError("""log10(0) is undefined.""" ) return round(logaa(n / df ) , 3 ) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' return round(tf * idf , 3 )
346
0
import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionPipeline from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device lowerCamelCase : Any =False class __a ( unittest.TestCase ): pass @nightly @require_torch_gpu class __a ( unittest.TestCase ): def __lowercase ( self : str ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowercase ( self : int ): '''simple docstring''' UpperCamelCase__ : Optional[Any] = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa ) pipe.to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) UpperCamelCase__ : Dict = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" ) UpperCamelCase__ : str = torch.manual_seed(0 ) UpperCamelCase__ : str = pipe.dual_guided( prompt="first prompt" , image=_UpperCAmelCase , text_to_image_strength=0.7_5 , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(_UpperCAmelCase ) UpperCamelCase__ : Optional[Any] = VersatileDiffusionPipeline.from_pretrained(_UpperCAmelCase , torch_dtype=torch.floataa ) pipe.to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) UpperCamelCase__ : Optional[Any] = generator.manual_seed(0 ) UpperCamelCase__ : Optional[Any] = pipe.dual_guided( prompt="first prompt" , image=_UpperCAmelCase , text_to_image_strength=0.7_5 , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass" def __lowercase ( self : Optional[Any] ): '''simple docstring''' UpperCamelCase__ : Tuple = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa ) pipe.to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) UpperCamelCase__ : Union[str, Any] = "cyberpunk 2077" UpperCamelCase__ : Tuple = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" ) UpperCamelCase__ : int = torch.manual_seed(0 ) UpperCamelCase__ : Any = pipe.dual_guided( prompt=_UpperCAmelCase , image=_UpperCAmelCase , text_to_image_strength=0.7_5 , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images UpperCamelCase__ : Optional[int] = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) UpperCamelCase__ : List[str] = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 UpperCamelCase__ : Dict = "A painting of a squirrel eating a burger " UpperCamelCase__ : Any = torch.manual_seed(0 ) UpperCamelCase__ : List[str] = pipe.text_to_image( prompt=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images UpperCamelCase__ : Optional[Any] = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) UpperCamelCase__ : Union[str, Any] = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 UpperCamelCase__ : List[str] = pipe.image_variation(_UpperCAmelCase , generator=_UpperCAmelCase , output_type="numpy" ).images UpperCamelCase__ : List[Any] = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) UpperCamelCase__ : str = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
189
'''simple docstring''' import argparse import torch from transformers import BertForMaskedLM if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser( description=( 'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned' ' Distillation' ) ) parser.add_argument('--model_type', default='bert', choices=['bert']) parser.add_argument('--model_name', default='bert-base-uncased', type=str) parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str) parser.add_argument('--vocab_transform', action='store_true') UpperCAmelCase_ = parser.parse_args() if args.model_type == "bert": UpperCAmelCase_ = BertForMaskedLM.from_pretrained(args.model_name) UpperCAmelCase_ = 'bert' else: raise ValueError('args.model_type should be "bert".') UpperCAmelCase_ = model.state_dict() UpperCAmelCase_ = {} for w in ["word_embeddings", "position_embeddings"]: UpperCAmelCase_ = state_dict[f"{prefix}.embeddings.{w}.weight"] for w in ["weight", "bias"]: UpperCAmelCase_ = state_dict[f"{prefix}.embeddings.LayerNorm.{w}"] UpperCAmelCase_ = 0 for teacher_idx in [0, 2, 4, 7, 9, 1_1]: for w in ["weight", "bias"]: UpperCAmelCase_ = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}" ] UpperCAmelCase_ = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}" ] UpperCAmelCase_ = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}" ] UpperCAmelCase_ = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}" ] UpperCAmelCase_ = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}" ] UpperCAmelCase_ = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}" ] UpperCAmelCase_ = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}" ] UpperCAmelCase_ = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}" ] std_idx += 1 UpperCAmelCase_ = state_dict['cls.predictions.decoder.weight'] UpperCAmelCase_ = state_dict['cls.predictions.bias'] if args.vocab_transform: for w in ["weight", "bias"]: UpperCAmelCase_ = state_dict[f"cls.predictions.transform.dense.{w}"] UpperCAmelCase_ = state_dict[f"cls.predictions.transform.LayerNorm.{w}"] print(f"N layers selected for distillation: {std_idx}") print(f"Number of params transferred for distillation: {len(compressed_sd.keys())}") print(f"Save transferred checkpoint to {args.dump_checkpoint}.") torch.save(compressed_sd, args.dump_checkpoint)
346
0
import argparse import logging import os import sys import numpy as np import onnxruntime import torch from bart_onnx.generation_onnx import BARTBeamSearchGenerator from bart_onnx.reduce_onnx_size import remove_dup_initializers import transformers from transformers import BartForConditionalGeneration, BartTokenizer logging.basicConfig( format="""%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s""", datefmt="""%Y-%m-%d %H:%M:%S""", level=os.environ.get("""LOGLEVEL""", """INFO""").upper(), stream=sys.stdout, ) UpperCamelCase__ = logging.getLogger(__name__) UpperCamelCase__ = {"""facebook/bart-base""": BartForConditionalGeneration} UpperCamelCase__ = {"""facebook/bart-base""": BartTokenizer} def _a ( ): __lowerCAmelCase = argparse.ArgumentParser(description="Export Bart model + Beam Search to ONNX graph." ) parser.add_argument( "--validation_file" , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , help="A csv or a json file containing the validation data." ) parser.add_argument( "--max_length" , type=SCREAMING_SNAKE_CASE__ , default=5 , help="The maximum total input sequence length after tokenization." , ) parser.add_argument( "--num_beams" , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , help=( "Number of beams to use for evaluation. This argument will be " "passed to ``model.generate``, which is used during ``evaluate`` and ``predict``." ) , ) parser.add_argument( "--model_name_or_path" , type=SCREAMING_SNAKE_CASE__ , help="Path to pretrained model or model identifier from huggingface.co/models." , required=SCREAMING_SNAKE_CASE__ , ) parser.add_argument( "--config_name" , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , help="Pretrained config name or path if not the same as model_name" , ) parser.add_argument( "--device" , type=SCREAMING_SNAKE_CASE__ , default="cpu" , help="Device where the model will be run" , ) parser.add_argument("--output_file_path" , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , help="Where to store the final ONNX file." ) __lowerCAmelCase = parser.parse_args() return args def _a ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple="cpu" ): __lowerCAmelCase = model_dict[model_name].from_pretrained(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ ) __lowerCAmelCase = tokenizer_dict[model_name].from_pretrained(SCREAMING_SNAKE_CASE__ ) if model_name in ["facebook/bart-base"]: __lowerCAmelCase = 0 __lowerCAmelCase = None __lowerCAmelCase = 0 return huggingface_model, tokenizer def _a ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] ): model.eval() __lowerCAmelCase = None __lowerCAmelCase = torch.jit.script(BARTBeamSearchGenerator(SCREAMING_SNAKE_CASE__ ) ) with torch.no_grad(): __lowerCAmelCase = "My friends are cool but they eat too many carbs." __lowerCAmelCase = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=10_24 , return_tensors="pt" ).to(model.device ) __lowerCAmelCase = model.generate( inputs["input_ids"] , attention_mask=inputs["attention_mask"] , num_beams=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , early_stopping=SCREAMING_SNAKE_CASE__ , decoder_start_token_id=model.config.decoder_start_token_id , ) torch.onnx.export( SCREAMING_SNAKE_CASE__ , ( inputs["input_ids"], inputs["attention_mask"], num_beams, max_length, model.config.decoder_start_token_id, ) , SCREAMING_SNAKE_CASE__ , opset_version=14 , input_names=["input_ids", "attention_mask", "num_beams", "max_length", "decoder_start_token_id"] , output_names=["output_ids"] , dynamic_axes={ "input_ids": {0: "batch", 1: "seq"}, "output_ids": {0: "batch", 1: "seq_out"}, } , example_outputs=SCREAMING_SNAKE_CASE__ , ) logger.info("Model exported to {}".format(SCREAMING_SNAKE_CASE__ ) ) __lowerCAmelCase = remove_dup_initializers(os.path.abspath(SCREAMING_SNAKE_CASE__ ) ) logger.info("Deduplicated and optimized model written to {}".format(SCREAMING_SNAKE_CASE__ ) ) __lowerCAmelCase = onnxruntime.InferenceSession(SCREAMING_SNAKE_CASE__ ) __lowerCAmelCase = ort_sess.run( SCREAMING_SNAKE_CASE__ , { "input_ids": inputs["input_ids"].cpu().numpy(), "attention_mask": inputs["attention_mask"].cpu().numpy(), "num_beams": np.array(SCREAMING_SNAKE_CASE__ ), "max_length": np.array(SCREAMING_SNAKE_CASE__ ), "decoder_start_token_id": np.array(model.config.decoder_start_token_id ), } , ) np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 ) logger.info("Model outputs from torch and ONNX Runtime are similar." ) logger.info("Success." ) def _a ( ): __lowerCAmelCase = parse_args() __lowerCAmelCase = 5 __lowerCAmelCase = 4 # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , ) logger.setLevel(logging.INFO ) transformers.utils.logging.set_verbosity_error() __lowerCAmelCase = torch.device(args.device ) __lowerCAmelCase , __lowerCAmelCase = load_model_tokenizer(args.model_name_or_path , SCREAMING_SNAKE_CASE__ ) if model.config.decoder_start_token_id is None: raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined" ) model.to(SCREAMING_SNAKE_CASE__ ) if args.max_length: __lowerCAmelCase = args.max_length if args.num_beams: __lowerCAmelCase = args.num_beams if args.output_file_path: __lowerCAmelCase = args.output_file_path else: __lowerCAmelCase = "BART.onnx" logger.info("Exporting model to ONNX" ) export_and_validate_model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": main()
92
'''simple docstring''' import tempfile import torch from diffusers import PNDMScheduler from .test_schedulers import SchedulerCommonTest class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' lowerCAmelCase_ : Union[str, Any] = (PNDMScheduler,) lowerCAmelCase_ : Optional[int] = (("""num_inference_steps""", 50),) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , **_UpperCAmelCase : Optional[int] ): """simple docstring""" UpperCAmelCase__ = { """num_train_timesteps""": 10_00, """beta_start""": 0.0001, """beta_end""": 0.02, """beta_schedule""": """linear""", } config.update(**_UpperCAmelCase ) return config def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : Tuple=0 , **_UpperCAmelCase : List[str] ): """simple docstring""" UpperCAmelCase__ = dict(self.forward_default_kwargs ) UpperCAmelCase__ = kwargs.pop("""num_inference_steps""" , _UpperCAmelCase ) UpperCAmelCase__ = self.dummy_sample UpperCAmelCase__ = 0.1 * sample UpperCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: UpperCAmelCase__ = self.get_scheduler_config(**_UpperCAmelCase ) UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residuals UpperCAmelCase__ = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_UpperCAmelCase ) UpperCAmelCase__ = scheduler_class.from_pretrained(_UpperCAmelCase ) new_scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residuals UpperCAmelCase__ = dummy_past_residuals[:] UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample UpperCAmelCase__ = new_scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample UpperCAmelCase__ = new_scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" pass def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : Union[str, Any]=0 , **_UpperCAmelCase : Optional[int] ): """simple docstring""" UpperCAmelCase__ = dict(self.forward_default_kwargs ) UpperCAmelCase__ = kwargs.pop("""num_inference_steps""" , _UpperCAmelCase ) UpperCAmelCase__ = self.dummy_sample UpperCAmelCase__ = 0.1 * sample UpperCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: UpperCAmelCase__ = self.get_scheduler_config() UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residuals (must be after setting timesteps) UpperCAmelCase__ = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_UpperCAmelCase ) UpperCAmelCase__ = scheduler_class.from_pretrained(_UpperCAmelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residual (must be after setting timesteps) UpperCAmelCase__ = dummy_past_residuals[:] UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample UpperCAmelCase__ = new_scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample UpperCAmelCase__ = new_scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def SCREAMING_SNAKE_CASE__ ( self : int , **_UpperCAmelCase : Tuple ): """simple docstring""" UpperCAmelCase__ = self.scheduler_classes[0] UpperCAmelCase__ = self.get_scheduler_config(**_UpperCAmelCase ) UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) UpperCAmelCase__ = 10 UpperCAmelCase__ = self.dummy_model() UpperCAmelCase__ = self.dummy_sample_deter scheduler.set_timesteps(_UpperCAmelCase ) for i, t in enumerate(scheduler.prk_timesteps ): UpperCAmelCase__ = model(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample for i, t in enumerate(scheduler.plms_timesteps ): UpperCAmelCase__ = model(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample return sample def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" UpperCAmelCase__ = dict(self.forward_default_kwargs ) UpperCAmelCase__ = kwargs.pop("""num_inference_steps""" , _UpperCAmelCase ) for scheduler_class in self.scheduler_classes: UpperCAmelCase__ = self.get_scheduler_config() UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) UpperCAmelCase__ = self.dummy_sample UpperCAmelCase__ = 0.1 * sample if num_inference_steps is not None and hasattr(_UpperCAmelCase , """set_timesteps""" ): scheduler.set_timesteps(_UpperCAmelCase ) elif num_inference_steps is not None and not hasattr(_UpperCAmelCase , """set_timesteps""" ): UpperCAmelCase__ = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) UpperCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] UpperCAmelCase__ = dummy_past_residuals[:] UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , 0 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , 1 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , 0 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , 1 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def SCREAMING_SNAKE_CASE__ ( self : int ): """simple docstring""" for timesteps in [1_00, 10_00]: self.check_over_configs(num_train_timesteps=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" for steps_offset in [0, 1]: self.check_over_configs(steps_offset=_UpperCAmelCase ) UpperCAmelCase__ = self.scheduler_classes[0] UpperCAmelCase__ = self.get_scheduler_config(steps_offset=1 ) UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(10 ) assert torch.equal( scheduler.timesteps , torch.LongTensor( [9_01, 8_51, 8_51, 8_01, 8_01, 7_51, 7_51, 7_01, 7_01, 6_51, 6_51, 6_01, 6_01, 5_01, 4_01, 3_01, 2_01, 1_01, 1] ) , ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): """simple docstring""" for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ): self.check_over_configs(beta_start=_UpperCAmelCase , beta_end=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" for t in [1, 5, 10]: self.check_over_forward(time_step=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00] ): self.check_over_forward(num_inference_steps=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" UpperCAmelCase__ = 27 for scheduler_class in self.scheduler_classes: UpperCAmelCase__ = self.dummy_sample UpperCAmelCase__ = 0.1 * sample UpperCAmelCase__ = self.get_scheduler_config() UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(_UpperCAmelCase ) # before power of 3 fix, would error on first step, so we only need to do two for i, t in enumerate(scheduler.prk_timesteps[:2] ): UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample def SCREAMING_SNAKE_CASE__ ( self : Dict ): """simple docstring""" with self.assertRaises(_UpperCAmelCase ): UpperCAmelCase__ = self.scheduler_classes[0] UpperCAmelCase__ = self.get_scheduler_config() UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): """simple docstring""" UpperCAmelCase__ = self.full_loop() UpperCAmelCase__ = torch.sum(torch.abs(_UpperCAmelCase ) ) UpperCAmelCase__ = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_sum.item() - 198.1318 ) < 1E-2 assert abs(result_mean.item() - 0.2580 ) < 1E-3 def SCREAMING_SNAKE_CASE__ ( self : Dict ): """simple docstring""" UpperCAmelCase__ = self.full_loop(prediction_type="""v_prediction""" ) UpperCAmelCase__ = torch.sum(torch.abs(_UpperCAmelCase ) ) UpperCAmelCase__ = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_sum.item() - 67.3986 ) < 1E-2 assert abs(result_mean.item() - 0.0878 ) < 1E-3 def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" UpperCAmelCase__ = self.full_loop(set_alpha_to_one=_UpperCAmelCase , beta_start=0.01 ) UpperCAmelCase__ = torch.sum(torch.abs(_UpperCAmelCase ) ) UpperCAmelCase__ = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_sum.item() - 230.0399 ) < 1E-2 assert abs(result_mean.item() - 0.2995 ) < 1E-3 def SCREAMING_SNAKE_CASE__ ( self : int ): """simple docstring""" UpperCAmelCase__ = self.full_loop(set_alpha_to_one=_UpperCAmelCase , beta_start=0.01 ) UpperCAmelCase__ = torch.sum(torch.abs(_UpperCAmelCase ) ) UpperCAmelCase__ = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_sum.item() - 186.9482 ) < 1E-2 assert abs(result_mean.item() - 0.2434 ) < 1E-3
346
0
lowercase__ : Tuple = [0, 2, 4, 6, 8] lowercase__ : Optional[int] = [1, 3, 5, 7, 9] def lowerCamelCase__ ( _A , _A , _A , _A ): '''simple docstring''' if remaining_length == 0: if digits[0] == 0 or digits[-1] == 0: return 0 for i in range(length // 2 - 1 , -1 , -1 ): remainder += digits[i] + digits[length - i - 1] if remainder % 2 == 0: return 0 remainder //= 10 return 1 if remaining_length == 1: if remainder % 2 == 0: return 0 snake_case_ = 0 for digit in range(10 ): snake_case_ = digit result += reversible_numbers( 0 , (remainder + 2 * digit) // 10 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return result snake_case_ = 0 for digita in range(10 ): snake_case_ = digita if (remainder + digita) % 2 == 0: snake_case_ = ODD_DIGITS else: snake_case_ = EVEN_DIGITS for digita in other_parity_digits: snake_case_ = digita result += reversible_numbers( remaining_length - 2 , (remainder + digita + digita) // 10 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ) return result def lowerCamelCase__ ( _A = 9 ): '''simple docstring''' snake_case_ = 0 for length in range(1 , max_power + 1 ): result += reversible_numbers(SCREAMING_SNAKE_CASE__ , 0 , [0] * length , SCREAMING_SNAKE_CASE__ ) return result if __name__ == "__main__": print(f'''{solution() = }''')
187
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { 'google/vivit-b-16x2-kinetics400': ( 'https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json' ), # See all Vivit models at https://huggingface.co/models?filter=vivit } class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' lowerCAmelCase_ : Optional[int] = """vivit""" def __init__( self : List[str] , _UpperCAmelCase : List[Any]=2_24 , _UpperCAmelCase : List[str]=32 , _UpperCAmelCase : Any=[2, 16, 16] , _UpperCAmelCase : int=3 , _UpperCAmelCase : Optional[Any]=7_68 , _UpperCAmelCase : Union[str, Any]=12 , _UpperCAmelCase : Dict=12 , _UpperCAmelCase : Optional[Any]=30_72 , _UpperCAmelCase : Optional[int]="gelu_fast" , _UpperCAmelCase : Union[str, Any]=0.0 , _UpperCAmelCase : Tuple=0.0 , _UpperCAmelCase : Optional[int]=0.02 , _UpperCAmelCase : List[Any]=1E-06 , _UpperCAmelCase : List[str]=True , **_UpperCAmelCase : List[Any] , ): """simple docstring""" UpperCAmelCase__ = hidden_size UpperCAmelCase__ = num_hidden_layers UpperCAmelCase__ = num_attention_heads UpperCAmelCase__ = intermediate_size UpperCAmelCase__ = hidden_act UpperCAmelCase__ = hidden_dropout_prob UpperCAmelCase__ = attention_probs_dropout_prob UpperCAmelCase__ = initializer_range UpperCAmelCase__ = layer_norm_eps UpperCAmelCase__ = image_size UpperCAmelCase__ = num_frames UpperCAmelCase__ = tubelet_size UpperCAmelCase__ = num_channels UpperCAmelCase__ = qkv_bias super().__init__(**_UpperCAmelCase )
346
0
'''simple docstring''' import random class lowercase_ : """simple docstring""" @staticmethod def SCREAMING_SNAKE_CASE ( lowercase__ : str ): __lowercase = [ord(_UpperCAmelCase ) for i in text] __lowercase = [] __lowercase = [] for i in plain: __lowercase = random.randint(1 ,3_0_0 ) __lowercase = (i + k) * k cipher.append(_UpperCAmelCase ) key.append(_UpperCAmelCase ) return cipher, key @staticmethod def SCREAMING_SNAKE_CASE ( lowercase__ : list[int] ,lowercase__ : list[int] ): __lowercase = [] for i in range(len(_UpperCAmelCase ) ): __lowercase = int((cipher[i] - (key[i]) ** 2) / key[i] ) plain.append(chr(_UpperCAmelCase ) ) return "".join(_UpperCAmelCase ) if __name__ == "__main__": lowerCAmelCase__ , lowerCAmelCase__ = Onepad().encrypt('''Hello''') print(c, k) print(Onepad().decrypt(c, k))
104
'''simple docstring''' import warnings from ...utils import logging from .image_processing_deit import DeiTImageProcessor UpperCAmelCase_ = logging.get_logger(__name__) class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' def __init__( self : List[str] , *_UpperCAmelCase : Optional[int] , **_UpperCAmelCase : Optional[Any] ): """simple docstring""" warnings.warn( """The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use DeiTImageProcessor instead.""" , _UpperCAmelCase , ) super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
346
0
'''simple docstring''' from timeit import timeit UpperCamelCase_ = { "MALAYALAM": True, "String": False, "rotor": True, "level": True, "A": True, "BB": True, "ABC": False, "amanaplanacanalpanama": True, # "a man a plan a canal panama" } # Ensure our test data is valid assert all((key == key[::-1]) is value for key, value in test_data.items()) def lowercase__( __UpperCamelCase: str ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = 0 SCREAMING_SNAKE_CASE : Union[str, Any] = len(SCREAMING_SNAKE_CASE__ ) - 1 while start_i < end_i: if s[start_i] == s[end_i]: start_i += 1 end_i -= 1 else: return False return True def lowercase__( __UpperCamelCase: str ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = len(SCREAMING_SNAKE_CASE__ ) // 2 SCREAMING_SNAKE_CASE : List[str] = len(SCREAMING_SNAKE_CASE__ ) # We need to traverse till half of the length of string # as we can get access of the i'th last element from # i'th index. # eg: [0,1,2,3,4,5] => 4th index can be accessed # with the help of 1st index (i==n-i-1) # where n is length of string return all(s[i] == s[n - i - 1] for i in range(SCREAMING_SNAKE_CASE__ ) ) def lowercase__( __UpperCamelCase: str ): """simple docstring""" if len(SCREAMING_SNAKE_CASE__ ) <= 2: return True if s[0] == s[len(SCREAMING_SNAKE_CASE__ ) - 1]: return is_palindrome_recursive(s[1:-1] ) else: return False def lowercase__( __UpperCamelCase: str ): """simple docstring""" return s == s[::-1] def lowercase__( __UpperCamelCase: str ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = f"all({name}(key) is value for key, value in test_data.items())" SCREAMING_SNAKE_CASE : Optional[Any] = f"from __main__ import test_data, {name}" SCREAMING_SNAKE_CASE : int = 50_00_00 SCREAMING_SNAKE_CASE : Optional[int] = timeit(stmt=SCREAMING_SNAKE_CASE__ ,setup=SCREAMING_SNAKE_CASE__ ,number=SCREAMING_SNAKE_CASE__ ) print(f"{name:<35} finished {number:,} runs in {result:.5f} seconds" ) if __name__ == "__main__": for key, value in test_data.items(): assert is_palindrome(key) is is_palindrome_recursive(key) assert is_palindrome(key) is is_palindrome_slice(key) print(F"""{key:21} {value}""") print("a man a plan a canal panama") # finished 500,000 runs in 0.46793 seconds benchmark_function("is_palindrome_slice") # finished 500,000 runs in 0.85234 seconds benchmark_function("is_palindrome") # finished 500,000 runs in 1.32028 seconds benchmark_function("is_palindrome_recursive") # finished 500,000 runs in 2.08679 seconds benchmark_function("is_palindrome_traversal")
251
'''simple docstring''' import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = {'vocab_file': 'spiece.model'} UpperCAmelCase_ = { 'vocab_file': { 'TsinghuaAI/CPM-Generate': 'https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model', } } class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' def __init__( self : Dict , _UpperCAmelCase : str , _UpperCAmelCase : Any=False , _UpperCAmelCase : int=True , _UpperCAmelCase : Union[str, Any]=False , _UpperCAmelCase : Dict="<s>" , _UpperCAmelCase : int="</s>" , _UpperCAmelCase : Dict="<unk>" , _UpperCAmelCase : Tuple="<sep>" , _UpperCAmelCase : List[Any]="<pad>" , _UpperCAmelCase : int="<cls>" , _UpperCAmelCase : Union[str, Any]="<mask>" , _UpperCAmelCase : List[str]=["<eop>", "<eod>"] , _UpperCAmelCase : Optional[Dict[str, Any]] = None , **_UpperCAmelCase : int , ): """simple docstring""" UpperCAmelCase__ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token UpperCAmelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=_UpperCAmelCase , remove_space=_UpperCAmelCase , keep_accents=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , ) UpperCAmelCase__ = 3 UpperCAmelCase__ = do_lower_case UpperCAmelCase__ = remove_space UpperCAmelCase__ = keep_accents UpperCAmelCase__ = vocab_file UpperCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_UpperCAmelCase ) try: import jieba except ModuleNotFoundError as error: raise error.__class__( """You need to install jieba to use CpmTokenizer or CpmTokenizerFast. """ """See https://pypi.org/project/jieba/ for installation.""" ) UpperCAmelCase__ = jieba UpperCAmelCase__ = str.maketrans(""" \n""" , """\u2582\u2583""" ) @property # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" return len(self.sp_model ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" UpperCAmelCase__ = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Dict ): """simple docstring""" UpperCAmelCase__ = self.__dict__.copy() UpperCAmelCase__ = None return state def __setstate__( self : Union[str, Any] , _UpperCAmelCase : Union[str, Any] ): """simple docstring""" UpperCAmelCase__ = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): UpperCAmelCase__ = {} UpperCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : Optional[Any] ): """simple docstring""" if self.remove_space: UpperCAmelCase__ = """ """.join(inputs.strip().split() ) else: UpperCAmelCase__ = inputs UpperCAmelCase__ = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" ) if not self.keep_accents: UpperCAmelCase__ = unicodedata.normalize("""NFKD""" , _UpperCAmelCase ) UpperCAmelCase__ = """""".join([c for c in outputs if not unicodedata.combining(_UpperCAmelCase )] ) if self.do_lower_case: UpperCAmelCase__ = outputs.lower() return outputs def SCREAMING_SNAKE_CASE__ ( self : Tuple , _UpperCAmelCase : str ): """simple docstring""" UpperCAmelCase__ = self.preprocess_text(_UpperCAmelCase ) UpperCAmelCase__ = self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase ) UpperCAmelCase__ = [] for piece in pieces: if len(_UpperCAmelCase ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit(): UpperCAmelCase__ = self.sp_model.EncodeAsPieces(piece[:-1].replace(_UpperCAmelCase , """""" ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: UpperCAmelCase__ = cur_pieces[1:] else: UpperCAmelCase__ = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(_UpperCAmelCase ) else: new_pieces.append(_UpperCAmelCase ) return new_pieces def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : Union[str, Any] ): """simple docstring""" return self.sp_model.PieceToId(_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : Any ): """simple docstring""" return self.sp_model.IdToPiece(_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : Dict ): """simple docstring""" UpperCAmelCase__ = """""".join(_UpperCAmelCase ).replace(_UpperCAmelCase , """ """ ).strip() return out_string def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ): """simple docstring""" UpperCAmelCase__ = [self.sep_token_id] UpperCAmelCase__ = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None , _UpperCAmelCase : bool = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase ) if token_ids_a is not None: return ([0] * len(_UpperCAmelCase )) + [1] + ([0] * len(_UpperCAmelCase )) + [1, 1] return ([0] * len(_UpperCAmelCase )) + [1, 1] def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ): """simple docstring""" UpperCAmelCase__ = [self.sep_token_id] UpperCAmelCase__ = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ): """simple docstring""" if not os.path.isdir(_UpperCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return UpperCAmelCase__ = os.path.join( _UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(_UpperCAmelCase , """wb""" ) as fi: UpperCAmelCase__ = self.sp_model.serialized_model_proto() fi.write(_UpperCAmelCase ) return (out_vocab_file,) def SCREAMING_SNAKE_CASE__ ( self : Tuple , *_UpperCAmelCase : Tuple , **_UpperCAmelCase : Optional[Any] ): """simple docstring""" UpperCAmelCase__ = super()._decode(*_UpperCAmelCase , **_UpperCAmelCase ) UpperCAmelCase__ = text.replace(""" """ , """""" ).replace("""\u2582""" , """ """ ).replace("""\u2583""" , """\n""" ) return text
346
0
"""simple docstring""" import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging __snake_case : Optional[Any] = logging.get_logger(__name__) __snake_case : Optional[int] = {'vocab_file': 'spiece.model'} __snake_case : Tuple = { 'vocab_file': { 'TsinghuaAI/CPM-Generate': 'https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model', } } class A__ ( lowerCamelCase_ ): '''simple docstring''' def __init__( self: Dict , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Any=False , _SCREAMING_SNAKE_CASE: int=True , _SCREAMING_SNAKE_CASE: Union[str, Any]=False , _SCREAMING_SNAKE_CASE: Dict="<s>" , _SCREAMING_SNAKE_CASE: int="</s>" , _SCREAMING_SNAKE_CASE: Dict="<unk>" , _SCREAMING_SNAKE_CASE: Tuple="<sep>" , _SCREAMING_SNAKE_CASE: List[Any]="<pad>" , _SCREAMING_SNAKE_CASE: int="<cls>" , _SCREAMING_SNAKE_CASE: Union[str, Any]="<mask>" , _SCREAMING_SNAKE_CASE: List[str]=["<eop>", "<eod>"] , _SCREAMING_SNAKE_CASE: Optional[Dict[str, Any]] = None , **_SCREAMING_SNAKE_CASE: int , ) -> List[str]: """simple docstring""" __lowerCAmelCase : Optional[int] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else mask_token __lowerCAmelCase : str = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=_UpperCAmelCase , remove_space=_UpperCAmelCase , keep_accents=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , ) __lowerCAmelCase : List[str] = 3 __lowerCAmelCase : Dict = do_lower_case __lowerCAmelCase : List[Any] = remove_space __lowerCAmelCase : List[Any] = keep_accents __lowerCAmelCase : List[str] = vocab_file __lowerCAmelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(_UpperCAmelCase) try: import jieba except ModuleNotFoundError as error: raise error.__class__( "You need to install jieba to use CpmTokenizer or CpmTokenizerFast. " "See https://pypi.org/project/jieba/ for installation.") __lowerCAmelCase : Optional[Any] = jieba __lowerCAmelCase : Any = str.maketrans(" \n" , "\u2582\u2583") @property # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> int: """simple docstring""" return len(self.sp_model) def _SCREAMING_SNAKE_CASE ( self: List[str]) -> Optional[Any]: """simple docstring""" __lowerCAmelCase : Tuple = {self.convert_ids_to_tokens(_UpperCAmelCase): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def __getstate__( self: Dict) -> int: """simple docstring""" __lowerCAmelCase : Any = self.__dict__.copy() __lowerCAmelCase : int = None return state def __setstate__( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: Union[str, Any]) -> Optional[int]: """simple docstring""" __lowerCAmelCase : Union[str, Any] = d # for backward compatibility if not hasattr(self , "sp_model_kwargs"): __lowerCAmelCase : str = {} __lowerCAmelCase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab_file) def _SCREAMING_SNAKE_CASE ( self: int , _SCREAMING_SNAKE_CASE: Optional[Any]) -> List[Any]: """simple docstring""" if self.remove_space: __lowerCAmelCase : Any = " ".join(inputs.strip().split()) else: __lowerCAmelCase : List[str] = inputs __lowerCAmelCase : Tuple = outputs.replace("``" , "\"").replace("''" , "\"") if not self.keep_accents: __lowerCAmelCase : Any = unicodedata.normalize("NFKD" , _UpperCAmelCase) __lowerCAmelCase : Tuple = "".join([c for c in outputs if not unicodedata.combining(_UpperCAmelCase)]) if self.do_lower_case: __lowerCAmelCase : str = outputs.lower() return outputs def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: str) -> int: """simple docstring""" __lowerCAmelCase : Tuple = self.preprocess_text(_UpperCAmelCase) __lowerCAmelCase : Any = self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase) __lowerCAmelCase : Tuple = [] for piece in pieces: if len(_UpperCAmelCase) > 1 and piece[-1] == str(",") and piece[-2].isdigit(): __lowerCAmelCase : Dict = self.sp_model.EncodeAsPieces(piece[:-1].replace(_UpperCAmelCase , "")) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0]) == 1: __lowerCAmelCase : Dict = cur_pieces[1:] else: __lowerCAmelCase : str = cur_pieces[0][1:] cur_pieces.append(piece[-1]) new_pieces.extend(_UpperCAmelCase) else: new_pieces.append(_UpperCAmelCase) return new_pieces def _SCREAMING_SNAKE_CASE ( self: Optional[int] , _SCREAMING_SNAKE_CASE: Union[str, Any]) -> Dict: """simple docstring""" return self.sp_model.PieceToId(_UpperCAmelCase) def _SCREAMING_SNAKE_CASE ( self: Dict , _SCREAMING_SNAKE_CASE: Any) -> List[Any]: """simple docstring""" return self.sp_model.IdToPiece(_UpperCAmelCase) def _SCREAMING_SNAKE_CASE ( self: int , _SCREAMING_SNAKE_CASE: Dict) -> List[Any]: """simple docstring""" __lowerCAmelCase : Any = "".join(_UpperCAmelCase).replace(_UpperCAmelCase , " ").strip() return out_string def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: List[int] , _SCREAMING_SNAKE_CASE: Optional[List[int]] = None) -> str: """simple docstring""" __lowerCAmelCase : Union[str, Any] = [self.sep_token_id] __lowerCAmelCase : List[Any] = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: List[int] , _SCREAMING_SNAKE_CASE: Optional[List[int]] = None , _SCREAMING_SNAKE_CASE: bool = False) -> str: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase) if token_ids_a is not None: return ([0] * len(_UpperCAmelCase)) + [1] + ([0] * len(_UpperCAmelCase)) + [1, 1] return ([0] * len(_UpperCAmelCase)) + [1, 1] def _SCREAMING_SNAKE_CASE ( self: Dict , _SCREAMING_SNAKE_CASE: List[int] , _SCREAMING_SNAKE_CASE: Optional[List[int]] = None) -> List[Any]: """simple docstring""" __lowerCAmelCase : List[str] = [self.sep_token_id] __lowerCAmelCase : Optional[int] = [2] if token_ids_a is None: return len(token_ids_a + sep) * [0] + cls_segment_id return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id def _SCREAMING_SNAKE_CASE ( self: List[Any] , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Optional[str] = None) -> Tuple: """simple docstring""" if not os.path.isdir(_UpperCAmelCase): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""") return __lowerCAmelCase : Dict = os.path.join( _UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]) if os.path.abspath(self.vocab_file) != os.path.abspath(_UpperCAmelCase) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file , _UpperCAmelCase) elif not os.path.isfile(self.vocab_file): with open(_UpperCAmelCase , "wb") as fi: __lowerCAmelCase : Tuple = self.sp_model.serialized_model_proto() fi.write(_UpperCAmelCase) return (out_vocab_file,) def _SCREAMING_SNAKE_CASE ( self: Tuple , *_SCREAMING_SNAKE_CASE: Tuple , **_SCREAMING_SNAKE_CASE: Optional[Any]) -> Dict: """simple docstring""" __lowerCAmelCase : List[Any] = super()._decode(*_UpperCAmelCase , **_UpperCAmelCase) __lowerCAmelCase : Optional[int] = text.replace(" " , "").replace("\u2582" , " ").replace("\u2583" , "\n") return text
269
'''simple docstring''' import argparse import logging import os import datasets import tensorflow as tf from transformers import AutoTokenizer UpperCAmelCase_ = logging.getLogger(__name__) def _UpperCamelCase ( ): '''simple docstring''' UpperCAmelCase__ = argparse.ArgumentParser( description="""Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.""" ) parser.add_argument( """--dataset_name""" , type=SCREAMING_SNAKE_CASE__ , default="""wikitext""" , help="""Name of the training. Explore datasets at: hf.co/datasets.""" , ) parser.add_argument( """--dataset_config""" , type=SCREAMING_SNAKE_CASE__ , default="""wikitext-103-raw-v1""" , help="""Configuration name of the dataset.""" ) parser.add_argument( """--tokenizer_name_or_path""" , type=SCREAMING_SNAKE_CASE__ , default="""sayakpaul/unigram-tokenizer-wikitext""" , help="""Tokenizer identifier. Can be a local filepath or a Hub identifier.""" , ) parser.add_argument( """--shard_size""" , type=SCREAMING_SNAKE_CASE__ , default=1000 , help="""Number of entries to go in a single shard.""" , ) parser.add_argument("""--split""" , type=SCREAMING_SNAKE_CASE__ , default="""train""" , choices=["""train""", """test""", """validation"""] ) parser.add_argument( """--limit""" , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , help="""Limit the number of shards (used for debugging).""" , ) parser.add_argument( """--max_length""" , type=SCREAMING_SNAKE_CASE__ , default=512 , help="""Maximum sequence length. For training on TPUs, it helps to have a maximum""" """ sequence length that is a multiple of 8.""" , ) parser.add_argument( """--output_dir""" , default="""tf-tpu""" , type=SCREAMING_SNAKE_CASE__ , help="""Output directory where the TFRecord shards will be saved. If the""" """ path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord""" """ shards will be directly saved to a Google Cloud Storage bucket.""" , ) UpperCAmelCase__ = parser.parse_args() return args def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Any ): '''simple docstring''' def fn(SCREAMING_SNAKE_CASE__ : Union[str, Any] ): return tokenizer(examples["""text"""] ) return fn def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] ): '''simple docstring''' UpperCAmelCase__ = [] for i in range(len(tokenized_data["""input_ids"""] ) ): UpperCAmelCase__ = { """input_ids""": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["""input_ids"""][i] ) ), """attention_mask""": tf.train.Feature( intaa_list=tf.train.IntaaList(value=tokenized_data["""attention_mask"""][i] ) ), } UpperCAmelCase__ = tf.train.Features(feature=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = tf.train.Example(features=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = example.SerializeToString() records.append(SCREAMING_SNAKE_CASE__ ) return records def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Any ): '''simple docstring''' UpperCAmelCase__ = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split ) if args.limit is not None: UpperCAmelCase__ = min(len(SCREAMING_SNAKE_CASE__ ) , args.limit ) UpperCAmelCase__ = dataset.select(range(SCREAMING_SNAKE_CASE__ ) ) print(F'''Limiting the dataset to {args.limit} entries.''' ) UpperCAmelCase__ = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path ) # Handle output directory creation. # For serializing into a Google Cloud Storage Bucket, one needs to first # create a bucket. if "gs" not in args.output_dir: if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) UpperCAmelCase__ = os.path.join(args.output_dir , args.split ) if not os.path.exists(SCREAMING_SNAKE_CASE__ ): os.makedirs(SCREAMING_SNAKE_CASE__ ) else: UpperCAmelCase__ = os.path.join(args.output_dir , args.split ) # Tokenize the whole dataset at once. UpperCAmelCase__ = tokenize_function(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = dataset.map(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , num_proc=4 , remove_columns=["""text"""] ) # We need to concatenate all our texts together, and then split the result # into chunks of a fixed size, which we will call block_size. To do this, we # will use the map method again, with the option batched=True. When we use batched=True, # the function we pass to map() will be passed multiple inputs at once, allowing us # to group them into more or fewer examples than we had in the input. # This allows us to create our new fixed-length samples. The advantage of this # method is that we don't lose a whole lot of content from the dataset compared to the # case where we simply tokenize with a pre-defined max_length. def group_texts(SCREAMING_SNAKE_CASE__ : int ): # Concatenate all texts. UpperCAmelCase__ = {k: sum(examples[k] , [] ) for k in examples.keys()} UpperCAmelCase__ = len(concatenated_examples[list(examples.keys() )[0]] ) # We drop the small remainder, though you could add padding instead if the model supports it # In this, as in all things, we advise you to follow your heart 🫀 UpperCAmelCase__ = (total_length // args.max_length) * args.max_length # Split by chunks of max_len. UpperCAmelCase__ = { k: [t[i : i + args.max_length] for i in range(0 , SCREAMING_SNAKE_CASE__ , args.max_length )] for k, t in concatenated_examples.items() } return result UpperCAmelCase__ = dataset_tokenized.map(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , batch_size=1000 , num_proc=4 ) UpperCAmelCase__ = 0 UpperCAmelCase__ = 0 for shard in range(0 , len(SCREAMING_SNAKE_CASE__ ) , args.shard_size ): UpperCAmelCase__ = grouped_dataset[shard : shard + args.shard_size] UpperCAmelCase__ = len(dataset_snapshot["""input_ids"""] ) UpperCAmelCase__ = os.path.join(SCREAMING_SNAKE_CASE__ , F'''dataset-{shard_count}-{records_containing}.tfrecord''' ) UpperCAmelCase__ = get_serialized_examples(SCREAMING_SNAKE_CASE__ ) with tf.io.TFRecordWriter(SCREAMING_SNAKE_CASE__ ) as out_file: for i in range(len(SCREAMING_SNAKE_CASE__ ) ): UpperCAmelCase__ = serialized_examples[i] out_file.write(SCREAMING_SNAKE_CASE__ ) print("""Wrote file {} containing {} records""".format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) shard_count += 1 total_records += records_containing with open(F'''split-{args.split}-records-count.txt''' , """w""" ) as f: print(F'''Total {args.split} records: {total_records}''' , file=SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": UpperCAmelCase_ = parse_args() main(args)
346
0
import unittest from transformers import PegasusConfig, PegasusTokenizer, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html __a = '''platform''' import jax import jax.numpy as jnp import numpy as np from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel @require_flax class __SCREAMING_SNAKE_CASE : A : int = PegasusConfig A : Dict = {} A : Optional[int] = """gelu""" def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=13 , SCREAMING_SNAKE_CASE__=7 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=99 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=37 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=20 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=0 , ): lowercase : Optional[Any] = parent lowercase : List[Any] = batch_size lowercase : str = seq_length lowercase : List[str] = is_training lowercase : List[Any] = use_labels lowercase : List[Any] = vocab_size lowercase : str = hidden_size lowercase : Union[str, Any] = num_hidden_layers lowercase : List[str] = num_attention_heads lowercase : Optional[int] = intermediate_size lowercase : List[str] = hidden_dropout_prob lowercase : int = attention_probs_dropout_prob lowercase : Dict = max_position_embeddings lowercase : List[Any] = eos_token_id lowercase : Dict = pad_token_id lowercase : Any = bos_token_id def __lowerCamelCase ( self ): lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size ) lowercase : int = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 ) lowercase : List[Any] = np.concatenate([input_ids, eos_tensor] , axis=1 ) lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase : List[Any] = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) lowercase : Union[str, Any] = prepare_pegasus_inputs_dict(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) return config, inputs_dict def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): lowercase : List[Any] = 20 lowercase : Union[str, Any] = model_class_name(_UpperCAmelCase ) lowercase : Union[str, Any] = model.encode(inputs_dict['''input_ids'''] ) lowercase , lowercase : Dict = ( inputs_dict['''decoder_input_ids'''], inputs_dict['''decoder_attention_mask'''], ) lowercase : List[str] = model.init_cache(decoder_input_ids.shape[0] , _UpperCAmelCase , _UpperCAmelCase ) lowercase : List[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' ) lowercase : str = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) lowercase : Optional[int] = model.decode( decoder_input_ids[:, :-1] , _UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase , decoder_position_ids=_UpperCAmelCase , ) lowercase : List[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' ) lowercase : Optional[int] = model.decode( decoder_input_ids[:, -1:] , _UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_UpperCAmelCase , ) lowercase : int = model.decode(_UpperCAmelCase , _UpperCAmelCase ) lowercase : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=f"""Max diff is {diff}""" ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): lowercase : str = 20 lowercase : Optional[Any] = model_class_name(_UpperCAmelCase ) lowercase : Dict = model.encode(inputs_dict['''input_ids'''] ) lowercase , lowercase : List[Any] = ( inputs_dict['''decoder_input_ids'''], inputs_dict['''decoder_attention_mask'''], ) lowercase : str = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) lowercase : str = model.init_cache(decoder_input_ids.shape[0] , _UpperCAmelCase , _UpperCAmelCase ) lowercase : Any = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) lowercase : Optional[Any] = model.decode( decoder_input_ids[:, :-1] , _UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase , decoder_position_ids=_UpperCAmelCase , ) lowercase : Tuple = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' ) lowercase : int = model.decode( decoder_input_ids[:, -1:] , _UpperCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_UpperCAmelCase , decoder_position_ids=_UpperCAmelCase , ) lowercase : Optional[Any] = model.decode(_UpperCAmelCase , _UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase ) lowercase : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=f"""Max diff is {diff}""" ) def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase=None, _UpperCamelCase=None, ) ->List[Any]: """simple docstring""" if attention_mask is None: lowercase : List[Any] = np.not_equal(SCREAMING_SNAKE_CASE__, config.pad_token_id ).astype(np.inta ) if decoder_attention_mask is None: lowercase : Optional[int] = np.concatenate( [ np.ones(decoder_input_ids[:, :1].shape, dtype=np.inta ), np.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ).astype(np.inta ), ], axis=-1, ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, } @require_flax class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ , unittest.TestCase ): A : Any = ( ( FlaxPegasusForConditionalGeneration, FlaxPegasusModel, ) if is_flax_available() else () ) A : Union[str, Any] = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else () A : List[str] = True A : Dict = False A : Optional[Any] = False A : Dict = False def __lowerCamelCase ( self ): lowercase : List[Any] = FlaxPegasusModelTester(self ) lowercase : List[Any] = ConfigTester(self , config_class=_UpperCAmelCase ) def __lowerCamelCase ( self ): self.config_tester.run_common_tests() def __lowerCamelCase ( self ): lowercase , lowercase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) def __lowerCamelCase ( self ): lowercase , lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) def __lowerCamelCase ( self ): lowercase , lowercase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): lowercase : Optional[Any] = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) lowercase : int = model_class(_UpperCAmelCase ) @jax.jit def encode_jitted(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , **SCREAMING_SNAKE_CASE__ ): return model.encode(input_ids=_UpperCAmelCase , attention_mask=_UpperCAmelCase ) with self.subTest('''JIT Enabled''' ): lowercase : Optional[Any] = encode_jitted(**_UpperCAmelCase ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): lowercase : int = encode_jitted(**_UpperCAmelCase ).to_tuple() self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) ) for jitted_output, output in zip(_UpperCAmelCase , _UpperCAmelCase ): self.assertEqual(jitted_output.shape , output.shape ) def __lowerCamelCase ( self ): lowercase , lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): lowercase : str = model_class(_UpperCAmelCase ) lowercase : int = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] ) lowercase : List[str] = { '''decoder_input_ids''': inputs_dict['''decoder_input_ids'''], '''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''], '''encoder_outputs''': encoder_outputs, } @jax.jit def decode_jitted(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): return model.decode( decoder_input_ids=_UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase , encoder_outputs=_UpperCAmelCase , ) with self.subTest('''JIT Enabled''' ): lowercase : Dict = decode_jitted(**_UpperCAmelCase ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): lowercase : List[Any] = decode_jitted(**_UpperCAmelCase ).to_tuple() self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) ) for jitted_output, output in zip(_UpperCAmelCase , _UpperCAmelCase ): self.assertEqual(jitted_output.shape , output.shape ) @slow def __lowerCamelCase ( self ): for model_class_name in self.all_model_classes: lowercase : List[Any] = model_class_name.from_pretrained('''google/pegasus-large''' , from_pt=_UpperCAmelCase ) lowercase : Tuple = np.ones((1, 1) ) lowercase : str = model(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) @slow def __lowerCamelCase ( self ): lowercase : Optional[int] = FlaxPegasusForConditionalGeneration.from_pretrained('''google/pegasus-xsum''' ) lowercase : Tuple = PegasusTokenizer.from_pretrained('''google/pegasus-xsum''' ) lowercase : List[str] = [ ''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''', ''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning \'Oh I think you\'re nominated\'\", said Dappy.\"And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around.\"At the end of the day we\'re grateful to be where we are in our careers.\"If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ''', ] lowercase : Optional[Any] = [ '''California\'s largest electricity provider has turned off power to hundreds of thousands of customers.''', '''Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.''', ] lowercase : int = tokenizer(_UpperCAmelCase , return_tensors='''np''' , truncation=_UpperCAmelCase , max_length=512 , padding=_UpperCAmelCase ) lowercase : Optional[int] = model.generate(**_UpperCAmelCase , num_beams=2 ).sequences lowercase : str = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase ) assert tgt_text == decoded
337
'''simple docstring''' import numpy as np import torch from torch.nn import CrossEntropyLoss from transformers import AutoModelForCausalLM, AutoTokenizer import datasets from datasets import logging UpperCAmelCase_ = '\\n\n' UpperCAmelCase_ = '\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n' UpperCAmelCase_ = '\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to \'cuda\' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 78.22\n >>> print(round(results["perplexities"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = datasets.load_dataset("wikitext",\n ... "wikitext-2-raw-v1",\n ... split="test")["text"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!=\'\']\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 60.35\n >>> print(round(results["perplexities"][0], 2))\n 81.12\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase_ ( datasets.Metric ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Dict ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """input_texts""": datasets.Value("""string""" ), } ) , reference_urls=["""https://huggingface.co/docs/transformers/perplexity"""] , ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : int = 16 , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[int]=None ): """simple docstring""" if device is not None: assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu." if device == "gpu": UpperCAmelCase__ = """cuda""" else: UpperCAmelCase__ = """cuda""" if torch.cuda.is_available() else """cpu""" UpperCAmelCase__ = AutoModelForCausalLM.from_pretrained(_UpperCAmelCase ) UpperCAmelCase__ = model.to(_UpperCAmelCase ) UpperCAmelCase__ = AutoTokenizer.from_pretrained(_UpperCAmelCase ) # if batch_size > 1 (which generally leads to padding being required), and # if there is not an already assigned pad_token, assign an existing # special token to also be the padding token if tokenizer.pad_token is None and batch_size > 1: UpperCAmelCase__ = list(tokenizer.special_tokens_map_extended.values() ) # check that the model already has at least one special token defined assert ( len(_UpperCAmelCase ) > 0 ), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1." # assign one of the special tokens to also be the pad token tokenizer.add_special_tokens({"""pad_token""": existing_special_tokens[0]} ) if add_start_token: # leave room for <BOS> token to be added: assert ( tokenizer.bos_token is not None ), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False" UpperCAmelCase__ = model.config.max_length - 1 else: UpperCAmelCase__ = model.config.max_length UpperCAmelCase__ = tokenizer( _UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , return_tensors="""pt""" , return_attention_mask=_UpperCAmelCase , ).to(_UpperCAmelCase ) UpperCAmelCase__ = encodings["""input_ids"""] UpperCAmelCase__ = encodings["""attention_mask"""] # check that each input is long enough: if add_start_token: assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long." else: assert torch.all( torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings." UpperCAmelCase__ = [] UpperCAmelCase__ = CrossEntropyLoss(reduction="""none""" ) for start_index in logging.tqdm(range(0 , len(_UpperCAmelCase ) , _UpperCAmelCase ) ): UpperCAmelCase__ = min(start_index + batch_size , len(_UpperCAmelCase ) ) UpperCAmelCase__ = encoded_texts[start_index:end_index] UpperCAmelCase__ = attn_masks[start_index:end_index] if add_start_token: UpperCAmelCase__ = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(_UpperCAmelCase ) UpperCAmelCase__ = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 ) UpperCAmelCase__ = torch.cat( [torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(_UpperCAmelCase ), attn_mask] , dim=1 ) UpperCAmelCase__ = encoded_batch with torch.no_grad(): UpperCAmelCase__ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase ).logits UpperCAmelCase__ = out_logits[..., :-1, :].contiguous() UpperCAmelCase__ = labels[..., 1:].contiguous() UpperCAmelCase__ = attn_mask[..., 1:].contiguous() UpperCAmelCase__ = torch.expa( (loss_fct(shift_logits.transpose(1 , 2 ) , _UpperCAmelCase ) * shift_attention_mask_batch).sum(1 ) / shift_attention_mask_batch.sum(1 ) ) ppls += perplexity_batch.tolist() return {"perplexities": ppls, "mean_perplexity": np.mean(_UpperCAmelCase )}
346
0
def snake_case__ ( SCREAMING_SNAKE_CASE_ : int = 1_000_000 ): '''simple docstring''' lowercase__ : Dict = [i - 1 for i in range(limit + 1 )] for i in range(2 , limit + 1 ): if phi[i] == i - 1: for j in range(2 * i , limit + 1 , SCREAMING_SNAKE_CASE__ ): phi[j] -= phi[j] // i return sum(phi[2 : limit + 1] ) if __name__ == "__main__": print(solution())
214
'''simple docstring''' def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int = 1000000 ): '''simple docstring''' UpperCAmelCase__ = [i - 1 for i in range(limit + 1 )] for i in range(2 , limit + 1 ): if phi[i] == i - 1: for j in range(2 * i , limit + 1 , SCREAMING_SNAKE_CASE__ ): phi[j] -= phi[j] // i return sum(phi[2 : limit + 1] ) if __name__ == "__main__": print(solution())
346
0
from collections import defaultdict from math import ceil, sqrt def _UpperCamelCase ( lowercase__ = 1000000 , lowercase__ = 10 ): __SCREAMING_SNAKE_CASE : Dict = defaultdict(SCREAMING_SNAKE_CASE__ ) for outer_width in range(3 , (t_limit // 4) + 2 ): if outer_width * outer_width > t_limit: __SCREAMING_SNAKE_CASE : int = max( ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 ) else: __SCREAMING_SNAKE_CASE : List[Any] = 1 hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2 for hole_width in range(SCREAMING_SNAKE_CASE__ , outer_width - 1 , 2 ): count[outer_width * outer_width - hole_width * hole_width] += 1 return sum(1 for n in count.values() if 1 <= n <= 10 ) if __name__ == "__main__": print(f"""{solution() = }""")
9
'''simple docstring''' from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING UpperCAmelCase_ = logging.get_logger(__name__) @add_end_docstrings(lowerCamelCase_ ) class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' def __init__( self : Optional[Any] , *_UpperCAmelCase : Union[str, Any] , **_UpperCAmelCase : Dict ): """simple docstring""" super().__init__(*_UpperCAmelCase , **_UpperCAmelCase ) requires_backends(self , """vision""" ) self.check_model_type( TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING if self.framework == """tf""" else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING ) def SCREAMING_SNAKE_CASE__ ( self : str , _UpperCAmelCase : List[Any]=None ): """simple docstring""" UpperCAmelCase__ = {} if top_k is not None: UpperCAmelCase__ = top_k return {}, {}, postprocess_params def __call__( self : Any , _UpperCAmelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_UpperCAmelCase : str ): """simple docstring""" return super().__call__(_UpperCAmelCase , **_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , _UpperCAmelCase : Tuple ): """simple docstring""" UpperCAmelCase__ = load_image(_UpperCAmelCase ) UpperCAmelCase__ = self.image_processor(images=_UpperCAmelCase , return_tensors=self.framework ) return model_inputs def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : Tuple ): """simple docstring""" UpperCAmelCase__ = self.model(**_UpperCAmelCase ) return model_outputs def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : str=5 ): """simple docstring""" if top_k > self.model.config.num_labels: UpperCAmelCase__ = self.model.config.num_labels if self.framework == "pt": UpperCAmelCase__ = model_outputs.logits.softmax(-1 )[0] UpperCAmelCase__ , UpperCAmelCase__ = probs.topk(_UpperCAmelCase ) elif self.framework == "tf": UpperCAmelCase__ = stable_softmax(model_outputs.logits , axis=-1 )[0] UpperCAmelCase__ = tf.math.top_k(_UpperCAmelCase , k=_UpperCAmelCase ) UpperCAmelCase__ , UpperCAmelCase__ = topk.values.numpy(), topk.indices.numpy() else: raise ValueError(f'''Unsupported framework: {self.framework}''' ) UpperCAmelCase__ = scores.tolist() UpperCAmelCase__ = ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCAmelCase , _UpperCAmelCase )]
346
0
"""simple docstring""" from manim import * class UpperCAmelCase (lowerCamelCase_ ): """simple docstring""" def _snake_case ( self ): lowercase__: Union[str, Any] = Rectangle(height=0.5 , width=0.5 ) lowercase__: List[Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) lowercase__: Optional[Any] = [mem.copy() for i in range(6 )] lowercase__: List[str] = [mem.copy() for i in range(6 )] lowercase__: Tuple = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) lowercase__: Optional[int] = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) lowercase__: str = VGroup(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) lowercase__: Any = Text('''CPU''' , font_size=24 ) lowercase__: Optional[int] = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase ) cpu.move_to([-2.5, -0.5, 0] ) self.add(_UpperCAmelCase ) lowercase__: str = [mem.copy() for i in range(4 )] lowercase__: str = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) lowercase__: List[Any] = Text('''GPU''' , font_size=24 ) lowercase__: Any = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase ) gpu.move_to([-1, -1, 0] ) self.add(_UpperCAmelCase ) lowercase__: str = [mem.copy() for i in range(6 )] lowercase__: Any = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) lowercase__: int = Text('''Model''' , font_size=24 ) lowercase__: str = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase ) model.move_to([3, -1.0, 0] ) self.add(_UpperCAmelCase ) lowercase__: Dict = [] for i, rect in enumerate(_UpperCAmelCase ): rect.set_stroke(_UpperCAmelCase ) # target = fill.copy().set_fill(YELLOW, opacity=0.7) # target.move_to(rect) # self.add(target) lowercase__: List[str] = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(_UpperCAmelCase , opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_UpperCAmelCase ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(cpu_targs[0] , direction=_UpperCAmelCase , buff=0.0 ) else: cpu_target.next_to(cpu_targs[i - 1] , direction=_UpperCAmelCase , buff=0.0 ) self.add(_UpperCAmelCase ) cpu_targs.append(_UpperCAmelCase ) lowercase__: Union[str, Any] = [mem.copy() for i in range(6 )] lowercase__: Tuple = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) lowercase__: Any = Text('''Loaded Checkpoint''' , font_size=24 ) lowercase__: Optional[int] = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , aligned_edge=_UpperCAmelCase , buff=0.4 ) checkpoint.move_to([3, 0.5, 0] ) lowercase__: Optional[Any] = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) lowercase__: Optional[int] = MarkupText( F"""<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model""" , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Any = MarkupText( F"""<span fgcolor=\'{BLUE}\'>●</span> Checkpoint""" , font_size=18 , ) blue_text.next_to(_UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() ) lowercase__: List[str] = MarkupText( F"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""" , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(_UpperCAmelCase ) , Write(_UpperCAmelCase ) ) self.play(Write(_UpperCAmelCase , run_time=1 ) , Create(_UpperCAmelCase , run_time=1 ) ) lowercase__: Any = [] lowercase__: str = [] for i, rect in enumerate(_UpperCAmelCase ): lowercase__: Union[str, Any] = fill.copy().set_fill(_UpperCAmelCase , opacity=0.7 ) target.move_to(_UpperCAmelCase ) first_animations.append(GrowFromCenter(_UpperCAmelCase , run_time=1 ) ) lowercase__: int = target.copy() cpu_target.generate_target() if i < 5: cpu_target.target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.target.move_to(cpu_right_col_base[i - 5] ) second_animations.append(MoveToTarget(_UpperCAmelCase , run_time=1.5 ) ) self.play(*_UpperCAmelCase ) self.play(*_UpperCAmelCase ) self.wait()
177
'''simple docstring''' from math import factorial def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int = 20 ): '''simple docstring''' UpperCAmelCase__ = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1, # 2, 3,... UpperCAmelCase__ = n // 2 return int(factorial(SCREAMING_SNAKE_CASE__ ) / (factorial(SCREAMING_SNAKE_CASE__ ) * factorial(n - k )) ) if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution(2_0)) else: try: UpperCAmelCase_ = int(sys.argv[1]) print(solution(n)) except ValueError: print('Invalid entry - please enter a number.')
346
0
'''simple docstring''' import math def __UpperCamelCase ( UpperCAmelCase ): lowercase__ : Optional[Any] = 0 lowercase__ : List[Any] = 0 while num > 0: lowercase__ : Tuple = num % 8 lowercase__ : int = octal + (remainder * math.floor(math.pow(10 , SCREAMING_SNAKE_CASE__ ) )) counter += 1 lowercase__ : List[Any] = math.floor(num / 8 ) # basically /= 8 without remainder if any # This formatting removes trailing '.0' from `octal`. return F"""0o{int(SCREAMING_SNAKE_CASE__ )}""" def __UpperCamelCase ( ): print('''\n2 in octal is:''' ) print(decimal_to_octal(2 ) ) # = 2 print('''\n8 in octal is:''' ) print(decimal_to_octal(8 ) ) # = 10 print('''\n65 in octal is:''' ) print(decimal_to_octal(65 ) ) # = 101 print('''\n216 in octal is:''' ) print(decimal_to_octal(216 ) ) # = 330 print('''\n512 in octal is:''' ) print(decimal_to_octal(512 ) ) # = 1000 print('''\n''' ) if __name__ == "__main__": main()
198
'''simple docstring''' import json import os import unittest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ : int = MgpstrTokenizer lowerCAmelCase_ : List[str] = False lowerCAmelCase_ : Optional[int] = {} lowerCAmelCase_ : Any = False def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): """simple docstring""" super().setUp() # fmt: off UpperCAmelCase__ = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""] # fmt: on UpperCAmelCase__ = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) ) UpperCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(_UpperCAmelCase ) + """\n""" ) def SCREAMING_SNAKE_CASE__ ( self : List[str] , **_UpperCAmelCase : Optional[Any] ): """simple docstring""" return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : Tuple ): """simple docstring""" UpperCAmelCase__ = """tester""" UpperCAmelCase__ = """tester""" return input_text, output_text @unittest.skip("""MGP-STR always lower cases letters.""" ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" pass def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" UpperCAmelCase__ = self.get_tokenizers(do_lower_case=_UpperCAmelCase ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): UpperCAmelCase__ = """[SPECIAL_TOKEN]""" tokenizer.add_special_tokens({"""cls_token""": special_token} ) UpperCAmelCase__ = tokenizer.encode([special_token] , add_special_tokens=_UpperCAmelCase ) self.assertEqual(len(_UpperCAmelCase ) , 1 ) UpperCAmelCase__ = tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase ) self.assertTrue(special_token not in decoded ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" UpperCAmelCase__ = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): UpperCAmelCase__ , UpperCAmelCase__ = self.get_input_output_texts(_UpperCAmelCase ) UpperCAmelCase__ = tokenizer.tokenize(_UpperCAmelCase ) UpperCAmelCase__ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) UpperCAmelCase__ = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = tokenizer.convert_ids_to_tokens(_UpperCAmelCase ) self.assertNotEqual(len(_UpperCAmelCase ) , 0 ) UpperCAmelCase__ = tokenizer.decode(_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase ) self.assertEqual(text_a.replace(""" """ , """""" ) , _UpperCAmelCase ) @unittest.skip("""MGP-STR tokenizer only handles one sequence.""" ) def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" pass @unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""" ) def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" pass
346
0
from __future__ import annotations def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Any: UpperCamelCase__ : List[Any] = list(range(len(SCREAMING_SNAKE_CASE__ ) ) ) UpperCamelCase__ : Dict = [v / w for v, w in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )] index.sort(key=lambda __lowerCAmelCase : ratio[i] , reverse=SCREAMING_SNAKE_CASE__ ) UpperCamelCase__ : str = 0 UpperCamelCase__ : Optional[int] = [0] * len(SCREAMING_SNAKE_CASE__ ) for i in index: if weight[i] <= capacity: UpperCamelCase__ : Union[str, Any] = 1 max_value += value[i] capacity -= weight[i] else: UpperCamelCase__ : List[Any] = capacity / weight[i] max_value += value[i] * capacity / weight[i] break return max_value, fractions if __name__ == "__main__": import doctest doctest.testmod()
189
'''simple docstring''' from abc import ABC, abstractmethod from typing import List, Optional class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' def __init__( self : Union[str, Any] ): """simple docstring""" self.test() def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" UpperCAmelCase__ = 0 UpperCAmelCase__ = False while not completed: if counter == 1: self.reset() UpperCAmelCase__ = self.advance() if not self.does_advance(_UpperCAmelCase ): raise Exception( """Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.""" ) UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.update(_UpperCAmelCase ) counter += 1 if counter > 1_00_00: raise Exception("""update() does not fulfill the constraint.""" ) if self.remaining() != 0: raise Exception("""Custom Constraint is not defined correctly.""" ) @abstractmethod def SCREAMING_SNAKE_CASE__ ( self : Dict ): """simple docstring""" raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : int ): """simple docstring""" raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def SCREAMING_SNAKE_CASE__ ( self : Any , _UpperCAmelCase : int ): """simple docstring""" raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): """simple docstring""" raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : List[Any]=False ): """simple docstring""" raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' def __init__( self : Union[str, Any] , _UpperCAmelCase : List[int] ): """simple docstring""" super(_UpperCAmelCase , self ).__init__() if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or len(_UpperCAmelCase ) == 0: raise ValueError(f'''`token_ids` has to be a non-empty list, but is {token_ids}.''' ) if any((not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or token_id < 0) for token_id in token_ids ): raise ValueError(f'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' ) UpperCAmelCase__ = token_ids UpperCAmelCase__ = len(self.token_ids ) UpperCAmelCase__ = -1 # the index of the currently fulfilled step UpperCAmelCase__ = False def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" if self.completed: return None return self.token_ids[self.fulfilled_idx + 1] def SCREAMING_SNAKE_CASE__ ( self : List[str] , _UpperCAmelCase : int ): """simple docstring""" if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(_UpperCAmelCase )}''' ) if self.completed: return False return token_id == self.token_ids[self.fulfilled_idx + 1] def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , _UpperCAmelCase : int ): """simple docstring""" if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(_UpperCAmelCase )}''' ) UpperCAmelCase__ = False UpperCAmelCase__ = False UpperCAmelCase__ = False if self.does_advance(_UpperCAmelCase ): self.fulfilled_idx += 1 UpperCAmelCase__ = True if self.fulfilled_idx == (self.seqlen - 1): UpperCAmelCase__ = True UpperCAmelCase__ = completed else: # failed to make progress. UpperCAmelCase__ = True self.reset() return stepped, completed, reset def SCREAMING_SNAKE_CASE__ ( self : Tuple ): """simple docstring""" UpperCAmelCase__ = False UpperCAmelCase__ = 0 def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" return self.seqlen - (self.fulfilled_idx + 1) def SCREAMING_SNAKE_CASE__ ( self : Any , _UpperCAmelCase : Optional[int]=False ): """simple docstring""" UpperCAmelCase__ = PhrasalConstraint(self.token_ids ) if stateful: UpperCAmelCase__ = self.seqlen UpperCAmelCase__ = self.fulfilled_idx UpperCAmelCase__ = self.completed return new_constraint class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Any , _UpperCAmelCase : List[List[int]] , _UpperCAmelCase : List[str]=True ): """simple docstring""" UpperCAmelCase__ = max([len(_UpperCAmelCase ) for one in nested_token_ids] ) UpperCAmelCase__ = {} for token_ids in nested_token_ids: UpperCAmelCase__ = root for tidx, token_id in enumerate(_UpperCAmelCase ): if token_id not in level: UpperCAmelCase__ = {} UpperCAmelCase__ = level[token_id] if no_subsets and self.has_subsets(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError( """Each list in `nested_token_ids` can't be a complete subset of another list, but is""" f''' {nested_token_ids}.''' ) UpperCAmelCase__ = root def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : int ): """simple docstring""" UpperCAmelCase__ = self.trie for current_token in current_seq: UpperCAmelCase__ = start[current_token] UpperCAmelCase__ = list(start.keys() ) return next_tokens def SCREAMING_SNAKE_CASE__ ( self : str , _UpperCAmelCase : Tuple ): """simple docstring""" UpperCAmelCase__ = self.next_tokens(_UpperCAmelCase ) return len(_UpperCAmelCase ) == 0 def SCREAMING_SNAKE_CASE__ ( self : List[str] , _UpperCAmelCase : Optional[int] ): """simple docstring""" UpperCAmelCase__ = list(root.values() ) if len(_UpperCAmelCase ) == 0: return 1 else: return sum([self.count_leaves(_UpperCAmelCase ) for nn in next_nodes] ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict ): """simple docstring""" UpperCAmelCase__ = self.count_leaves(_UpperCAmelCase ) return len(_UpperCAmelCase ) != leaf_count class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' def __init__( self : Dict , _UpperCAmelCase : List[List[int]] ): """simple docstring""" super(_UpperCAmelCase , self ).__init__() if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or len(_UpperCAmelCase ) == 0: raise ValueError(f'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' ) if any(not isinstance(_UpperCAmelCase , _UpperCAmelCase ) for token_ids in nested_token_ids ): raise ValueError(f'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' ) if any( any((not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or token_id < 0) for token_id in token_ids ) for token_ids in nested_token_ids ): raise ValueError( f'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' ) UpperCAmelCase__ = DisjunctiveTrie(_UpperCAmelCase ) UpperCAmelCase__ = nested_token_ids UpperCAmelCase__ = self.trie.max_height UpperCAmelCase__ = [] UpperCAmelCase__ = False def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" UpperCAmelCase__ = self.trie.next_tokens(self.current_seq ) if len(_UpperCAmelCase ) == 0: return None else: return token_list def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : int ): """simple docstring""" if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(_UpperCAmelCase )}''' ) UpperCAmelCase__ = self.trie.next_tokens(self.current_seq ) return token_id in next_tokens def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : int ): """simple docstring""" if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(_UpperCAmelCase )}''' ) UpperCAmelCase__ = False UpperCAmelCase__ = False UpperCAmelCase__ = False if self.does_advance(_UpperCAmelCase ): self.current_seq.append(_UpperCAmelCase ) UpperCAmelCase__ = True else: UpperCAmelCase__ = True self.reset() UpperCAmelCase__ = self.trie.reached_leaf(self.current_seq ) UpperCAmelCase__ = completed return stepped, completed, reset def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" UpperCAmelCase__ = False UpperCAmelCase__ = [] def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" if self.completed: # since this can be completed without reaching max height return 0 else: return self.seqlen - len(self.current_seq ) def SCREAMING_SNAKE_CASE__ ( self : Tuple , _UpperCAmelCase : Dict=False ): """simple docstring""" UpperCAmelCase__ = DisjunctiveConstraint(self.token_ids ) if stateful: UpperCAmelCase__ = self.seqlen UpperCAmelCase__ = self.current_seq UpperCAmelCase__ = self.completed return new_constraint class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Optional[int] , _UpperCAmelCase : List[Constraint] ): """simple docstring""" UpperCAmelCase__ = constraints # max # of steps required to fulfill a given constraint UpperCAmelCase__ = max([c.seqlen for c in constraints] ) UpperCAmelCase__ = len(_UpperCAmelCase ) UpperCAmelCase__ = False self.init_state() def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" UpperCAmelCase__ = [] UpperCAmelCase__ = None UpperCAmelCase__ = [constraint.copy(stateful=_UpperCAmelCase ) for constraint in self.constraints] def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" UpperCAmelCase__ = 0 if self.inprogress_constraint: # extra points for having a constraint mid-fulfilled add += self.max_seqlen - self.inprogress_constraint.remaining() return (len(self.complete_constraints ) * self.max_seqlen) + add def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): """simple docstring""" UpperCAmelCase__ = [] if self.inprogress_constraint is None: for constraint in self.pending_constraints: # "pending" == "unfulfilled yet" UpperCAmelCase__ = constraint.advance() if isinstance(_UpperCAmelCase , _UpperCAmelCase ): token_list.append(_UpperCAmelCase ) elif isinstance(_UpperCAmelCase , _UpperCAmelCase ): token_list.extend(_UpperCAmelCase ) else: UpperCAmelCase__ = self.inprogress_constraint.advance() if isinstance(_UpperCAmelCase , _UpperCAmelCase ): token_list.append(_UpperCAmelCase ) elif isinstance(_UpperCAmelCase , _UpperCAmelCase ): token_list.extend(_UpperCAmelCase ) if len(_UpperCAmelCase ) == 0: return None else: return token_list def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : Optional[List[int]] ): """simple docstring""" self.init_state() if token_ids is not None: for token in token_ids: # completes or steps **one** constraint UpperCAmelCase__ , UpperCAmelCase__ = self.add(_UpperCAmelCase ) # the entire list of constraints are fulfilled if self.completed: break def SCREAMING_SNAKE_CASE__ ( self : Any , _UpperCAmelCase : int ): """simple docstring""" if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError(f'''`token_id` should be an `int`, but is `{token_id}`.''' ) UpperCAmelCase__ , UpperCAmelCase__ = False, False if self.completed: UpperCAmelCase__ = True UpperCAmelCase__ = False return complete, stepped if self.inprogress_constraint is not None: # In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current # job, simply update the state UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.inprogress_constraint.update(_UpperCAmelCase ) if reset: # 1. If the next token breaks the progress, then we must restart. # e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books". # But that doesn't mean we self.init_state(), since we only reset the state for this particular # constraint, not the full list of constraints. self.pending_constraints.append(self.inprogress_constraint.copy(stateful=_UpperCAmelCase ) ) UpperCAmelCase__ = None if complete: # 2. If the next token completes the constraint, move it to completed list, set # inprogress to None. If there are no pending constraints either, then this full list of constraints # is complete. self.complete_constraints.append(self.inprogress_constraint ) UpperCAmelCase__ = None if len(self.pending_constraints ) == 0: # we're done! UpperCAmelCase__ = True else: # Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list # of constraints? for cidx, pending_constraint in enumerate(self.pending_constraints ): if pending_constraint.does_advance(_UpperCAmelCase ): UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = pending_constraint.update(_UpperCAmelCase ) if not stepped: raise Exception( """`constraint.update(token_id)` is not yielding incremental progress, """ """even though `constraint.does_advance(token_id)` is true.""" ) if complete: self.complete_constraints.append(_UpperCAmelCase ) UpperCAmelCase__ = None if not complete and stepped: UpperCAmelCase__ = pending_constraint if complete or stepped: # If we made any progress at all, then it's at least not a "pending constraint". UpperCAmelCase__ = ( self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :] ) if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None: # If there's no longer any pending after this and no inprogress either, then we must be # complete. UpperCAmelCase__ = True break # prevent accidentally stepping through multiple constraints with just one token. return complete, stepped def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : List[Any]=True ): """simple docstring""" UpperCAmelCase__ = ConstraintListState(self.constraints ) # we actually never though self.constraints objects # throughout this process. So it's at initialization state. if stateful: UpperCAmelCase__ = [ constraint.copy(stateful=_UpperCAmelCase ) for constraint in self.complete_constraints ] if self.inprogress_constraint is not None: UpperCAmelCase__ = self.inprogress_constraint.copy(stateful=_UpperCAmelCase ) UpperCAmelCase__ = [constraint.copy() for constraint in self.pending_constraints] return new_state
346
0
import unittest from transformers.testing_utils import CaptureStdout from transformers.tools.python_interpreter import evaluate def _a ( SCREAMING_SNAKE_CASE_ : str ): return x + 2 class a__ ( unittest.TestCase ): def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = "x = 3" __lowerCAmelCase = {} __lowerCAmelCase = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase ) assert result == 3 self.assertDictEqual(_UpperCAmelCase , {"x": 3} ) __lowerCAmelCase = "x = y" __lowerCAmelCase = {"y": 5} __lowerCAmelCase = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(_UpperCAmelCase , {"x": 5, "y": 5} ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = "y = add_two(x)" __lowerCAmelCase = {"x": 3} __lowerCAmelCase = evaluate(_UpperCAmelCase , {"add_two": add_two} , state=_UpperCAmelCase ) assert result == 5 self.assertDictEqual(_UpperCAmelCase , {"x": 3, "y": 5} ) # Won't work without the tool with CaptureStdout() as out: __lowerCAmelCase = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase ) assert result is None assert "tried to execute add_two" in out.out def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = "x = 3" __lowerCAmelCase = {} __lowerCAmelCase = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase ) assert result == 3 self.assertDictEqual(_UpperCAmelCase , {"x": 3} ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = "test_dict = {'x': x, 'y': add_two(x)}" __lowerCAmelCase = {"x": 3} __lowerCAmelCase = evaluate(_UpperCAmelCase , {"add_two": add_two} , state=_UpperCAmelCase ) self.assertDictEqual(_UpperCAmelCase , {"x": 3, "y": 5} ) self.assertDictEqual(_UpperCAmelCase , {"x": 3, "test_dict": {"x": 3, "y": 5}} ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = "x = 3\ny = 5" __lowerCAmelCase = {} __lowerCAmelCase = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(_UpperCAmelCase , {"x": 3, "y": 5} ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = "text = f'This is x: {x}.'" __lowerCAmelCase = {"x": 3} __lowerCAmelCase = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase ) # evaluate returns the value of the last assignment. assert result == "This is x: 3." self.assertDictEqual(_UpperCAmelCase , {"x": 3, "text": "This is x: 3."} ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = "if x <= 3:\n y = 2\nelse:\n y = 5" __lowerCAmelCase = {"x": 3} __lowerCAmelCase = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase ) # evaluate returns the value of the last assignment. assert result == 2 self.assertDictEqual(_UpperCAmelCase , {"x": 3, "y": 2} ) __lowerCAmelCase = {"x": 8} __lowerCAmelCase = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(_UpperCAmelCase , {"x": 8, "y": 5} ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = "test_list = [x, add_two(x)]" __lowerCAmelCase = {"x": 3} __lowerCAmelCase = evaluate(_UpperCAmelCase , {"add_two": add_two} , state=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , [3, 5] ) self.assertDictEqual(_UpperCAmelCase , {"x": 3, "test_list": [3, 5]} ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = "y = x" __lowerCAmelCase = {"x": 3} __lowerCAmelCase = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase ) assert result == 3 self.assertDictEqual(_UpperCAmelCase , {"x": 3, "y": 3} ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = "test_list = [x, add_two(x)]\ntest_list[1]" __lowerCAmelCase = {"x": 3} __lowerCAmelCase = evaluate(_UpperCAmelCase , {"add_two": add_two} , state=_UpperCAmelCase ) assert result == 5 self.assertDictEqual(_UpperCAmelCase , {"x": 3, "test_list": [3, 5]} ) __lowerCAmelCase = "test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']" __lowerCAmelCase = {"x": 3} __lowerCAmelCase = evaluate(_UpperCAmelCase , {"add_two": add_two} , state=_UpperCAmelCase ) assert result == 5 self.assertDictEqual(_UpperCAmelCase , {"x": 3, "test_dict": {"x": 3, "y": 5}} ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = "x = 0\nfor i in range(3):\n x = i" __lowerCAmelCase = {} __lowerCAmelCase = evaluate(_UpperCAmelCase , {"range": range} , state=_UpperCAmelCase ) assert result == 2 self.assertDictEqual(_UpperCAmelCase , {"x": 2, "i": 2} )
92
'''simple docstring''' import doctest import logging import os import unittest from pathlib import Path from typing import List, Union import transformers from transformers.testing_utils import require_tf, require_torch, slow UpperCAmelCase_ = logging.getLogger() @unittest.skip("""Temporarily disable the doc tests.""" ) @require_torch @require_tf @slow class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : str , _UpperCAmelCase : Path , _UpperCAmelCase : Union[str, None] = None , _UpperCAmelCase : Union[List[str], None] = None , _UpperCAmelCase : Union[str, List[str], None] = None , _UpperCAmelCase : bool = True , ): """simple docstring""" UpperCAmelCase__ = [file for file in os.listdir(_UpperCAmelCase ) if os.path.isfile(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) )] if identifier is not None: UpperCAmelCase__ = [file for file in files if identifier in file] if n_identifier is not None: if isinstance(_UpperCAmelCase , _UpperCAmelCase ): for n_ in n_identifier: UpperCAmelCase__ = [file for file in files if n_ not in file] else: UpperCAmelCase__ = [file for file in files if n_identifier not in file] UpperCAmelCase__ = ignore_files or [] ignore_files.append("""__init__.py""" ) UpperCAmelCase__ = [file for file in files if file not in ignore_files] for file in files: # Open all files print("""Testing""" , _UpperCAmelCase ) if only_modules: UpperCAmelCase__ = file.split(""".""" )[0] try: UpperCAmelCase__ = getattr(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = doctest.DocTestSuite(_UpperCAmelCase ) UpperCAmelCase__ = unittest.TextTestRunner().run(_UpperCAmelCase ) self.assertIs(len(result.failures ) , 0 ) except AttributeError: logger.info(f'''{module_identifier} is not a module.''' ) else: UpperCAmelCase__ = doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS ) self.assertIs(result.failed , 0 ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" UpperCAmelCase__ = Path("""src/transformers""" ) UpperCAmelCase__ = """modeling""" UpperCAmelCase__ = [ """modeling_ctrl.py""", """modeling_tf_ctrl.py""", ] self.analyze_directory(_UpperCAmelCase , identifier=_UpperCAmelCase , ignore_files=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" UpperCAmelCase__ = Path("""src/transformers""" ) UpperCAmelCase__ = """tokenization""" self.analyze_directory(_UpperCAmelCase , identifier=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" UpperCAmelCase__ = Path("""src/transformers""" ) UpperCAmelCase__ = """configuration""" self.analyze_directory(_UpperCAmelCase , identifier=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" UpperCAmelCase__ = Path("""src/transformers""" ) UpperCAmelCase__ = ["""configuration""", """modeling""", """tokenization"""] self.analyze_directory(_UpperCAmelCase , n_identifier=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" UpperCAmelCase__ = Path("""docs/source""" ) UpperCAmelCase__ = ["""favicon.ico"""] self.analyze_directory(_UpperCAmelCase , ignore_files=_UpperCAmelCase , only_modules=_UpperCAmelCase )
346
0
import PIL.Image import PIL.ImageOps from packaging import version from PIL import Image if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"): lowercase__ : Optional[Any] = { "linear": PIL.Image.Resampling.BILINEAR, "bilinear": PIL.Image.Resampling.BILINEAR, "bicubic": PIL.Image.Resampling.BICUBIC, "lanczos": PIL.Image.Resampling.LANCZOS, "nearest": PIL.Image.Resampling.NEAREST, } else: lowercase__ : Optional[int] = { "linear": PIL.Image.LINEAR, "bilinear": PIL.Image.BILINEAR, "bicubic": PIL.Image.BICUBIC, "lanczos": PIL.Image.LANCZOS, "nearest": PIL.Image.NEAREST, } def lowerCamelCase__ ( _A ): '''simple docstring''' snake_case_ = (images / 2 + 0.5).clamp(0 , 1 ) snake_case_ = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() snake_case_ = numpy_to_pil(SCREAMING_SNAKE_CASE__ ) return images def lowerCamelCase__ ( _A ): '''simple docstring''' if images.ndim == 3: snake_case_ = images[None, ...] snake_case_ = (images * 255).round().astype("uint8" ) if images.shape[-1] == 1: # special case for grayscale (single channel) images snake_case_ = [Image.fromarray(image.squeeze() , mode="L" ) for image in images] else: snake_case_ = [Image.fromarray(SCREAMING_SNAKE_CASE__ ) for image in images] return pil_images
187
'''simple docstring''' from datasets.utils.patching import _PatchedModuleObj, patch_submodule from . import _test_patching def _UpperCamelCase ( ): '''simple docstring''' import os as original_os from os import path as original_path from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join UpperCAmelCase__ = """__test_patch_submodule_mock__""" with patch_submodule(_test_patching , """os.path.join""" , SCREAMING_SNAKE_CASE__ ): # Every way to access os.path.join must be patched, and the rest must stay untouched # check os.path.join assert isinstance(_test_patching.os , _PatchedModuleObj ) assert isinstance(_test_patching.os.path , _PatchedModuleObj ) assert _test_patching.os.path.join is mock # check path.join assert isinstance(_test_patching.path , _PatchedModuleObj ) assert _test_patching.path.join is mock # check join assert _test_patching.join is mock # check that the other attributes are untouched assert _test_patching.os.rename is original_rename assert _test_patching.path.dirname is original_dirname assert _test_patching.os.path.dirname is original_dirname # Even renamed modules or objects must be patched # check renamed_os.path.join assert isinstance(_test_patching.renamed_os , _PatchedModuleObj ) assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj ) assert _test_patching.renamed_os.path.join is mock # check renamed_path.join assert isinstance(_test_patching.renamed_path , _PatchedModuleObj ) assert _test_patching.renamed_path.join is mock # check renamed_join assert _test_patching.renamed_join is mock # check that the other attributes are untouched assert _test_patching.renamed_os.rename is original_rename assert _test_patching.renamed_path.dirname is original_dirname assert _test_patching.renamed_os.path.dirname is original_dirname # check that everthing is back to normal when the patch is over assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join def _UpperCamelCase ( ): '''simple docstring''' assert _test_patching.open is open UpperCAmelCase__ = """__test_patch_submodule_builtin_mock__""" # _test_patching has "open" in its globals assert _test_patching.open is open with patch_submodule(_test_patching , """open""" , SCREAMING_SNAKE_CASE__ ): assert _test_patching.open is mock # check that everthing is back to normal when the patch is over assert _test_patching.open is open def _UpperCamelCase ( ): '''simple docstring''' UpperCAmelCase__ = """__test_patch_submodule_missing_mock__""" with patch_submodule(_test_patching , """pandas.read_csv""" , SCREAMING_SNAKE_CASE__ ): pass def _UpperCamelCase ( ): '''simple docstring''' UpperCAmelCase__ = """__test_patch_submodule_missing_builtin_mock__""" # _test_patching doesn't have "len" in its globals assert getattr(_test_patching , """len""" , SCREAMING_SNAKE_CASE__ ) is None with patch_submodule(_test_patching , """len""" , SCREAMING_SNAKE_CASE__ ): assert _test_patching.len is mock assert _test_patching.len is len def _UpperCamelCase ( ): '''simple docstring''' UpperCAmelCase__ = """__test_patch_submodule_start_and_stop_mock__""" UpperCAmelCase__ = patch_submodule(_test_patching , """open""" , SCREAMING_SNAKE_CASE__ ) assert _test_patching.open is open patch.start() assert _test_patching.open is mock patch.stop() assert _test_patching.open is open def _UpperCamelCase ( ): '''simple docstring''' from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join UpperCAmelCase__ = """__test_patch_submodule_successive_join__""" UpperCAmelCase__ = """__test_patch_submodule_successive_dirname__""" UpperCAmelCase__ = """__test_patch_submodule_successive_rename__""" assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename with patch_submodule(_test_patching , """os.path.join""" , SCREAMING_SNAKE_CASE__ ): with patch_submodule(_test_patching , """os.rename""" , SCREAMING_SNAKE_CASE__ ): with patch_submodule(_test_patching , """os.path.dirname""" , SCREAMING_SNAKE_CASE__ ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename # try another order with patch_submodule(_test_patching , """os.rename""" , SCREAMING_SNAKE_CASE__ ): with patch_submodule(_test_patching , """os.path.join""" , SCREAMING_SNAKE_CASE__ ): with patch_submodule(_test_patching , """os.path.dirname""" , SCREAMING_SNAKE_CASE__ ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename def _UpperCamelCase ( ): '''simple docstring''' UpperCAmelCase__ = """__test_patch_submodule_doesnt_exist_mock__""" with patch_submodule(_test_patching , """__module_that_doesn_exist__.__attribute_that_doesn_exist__""" , SCREAMING_SNAKE_CASE__ ): pass with patch_submodule(_test_patching , """os.__attribute_that_doesn_exist__""" , SCREAMING_SNAKE_CASE__ ): pass
346
0
'''simple docstring''' import requests lowerCAmelCase__ = '''YOUR API KEY''' def _A ( A__ , A__ = giphy_api_key ): """simple docstring""" __lowercase = '''+'''.join(query.split() ) __lowercase = F"https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}" __lowercase = requests.get(SCREAMING_SNAKE_CASE__ ).json()['''data'''] return [gif["url"] for gif in gifs] if __name__ == "__main__": print('''\n'''.join(get_gifs('''space ship''')))
104
'''simple docstring''' from timeit import timeit UpperCAmelCase_ = { 'MALAYALAM': True, 'String': False, 'rotor': True, 'level': True, 'A': True, 'BB': True, 'ABC': False, 'amanaplanacanalpanama': True, # "a man a plan a canal panama" } # Ensure our test data is valid assert all((key == key[::-1]) is value for key, value in test_data.items()) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' UpperCAmelCase__ = 0 UpperCAmelCase__ = len(SCREAMING_SNAKE_CASE__ ) - 1 while start_i < end_i: if s[start_i] == s[end_i]: start_i += 1 end_i -= 1 else: return False return True def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' UpperCAmelCase__ = len(SCREAMING_SNAKE_CASE__ ) // 2 UpperCAmelCase__ = len(SCREAMING_SNAKE_CASE__ ) # We need to traverse till half of the length of string # as we can get access of the i'th last element from # i'th index. # eg: [0,1,2,3,4,5] => 4th index can be accessed # with the help of 1st index (i==n-i-1) # where n is length of string return all(s[i] == s[n - i - 1] for i in range(SCREAMING_SNAKE_CASE__ ) ) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' if len(SCREAMING_SNAKE_CASE__ ) <= 2: return True if s[0] == s[len(SCREAMING_SNAKE_CASE__ ) - 1]: return is_palindrome_recursive(s[1:-1] ) else: return False def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' return s == s[::-1] def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' UpperCAmelCase__ = F'''all({name}(key) is value for key, value in test_data.items())''' UpperCAmelCase__ = F'''from __main__ import test_data, {name}''' UpperCAmelCase__ = 500000 UpperCAmelCase__ = timeit(stmt=SCREAMING_SNAKE_CASE__ , setup=SCREAMING_SNAKE_CASE__ , number=SCREAMING_SNAKE_CASE__ ) print(F'''{name:<35} finished {number:,} runs in {result:.5f} seconds''' ) if __name__ == "__main__": for key, value in test_data.items(): assert is_palindrome(key) is is_palindrome_recursive(key) assert is_palindrome(key) is is_palindrome_slice(key) print(f"{key:21} {value}") print('a man a plan a canal panama') # finished 500,000 runs in 0.46793 seconds benchmark_function('is_palindrome_slice') # finished 500,000 runs in 0.85234 seconds benchmark_function('is_palindrome') # finished 500,000 runs in 1.32028 seconds benchmark_function('is_palindrome_recursive') # finished 500,000 runs in 2.08679 seconds benchmark_function('is_palindrome_traversal')
346
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available UpperCamelCase_ = { "configuration_mask2former": [ "MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "Mask2FormerConfig", ], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = ["Mask2FormerImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ "MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "Mask2FormerForUniversalSegmentation", "Mask2FormerModel", "Mask2FormerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_maskaformer import MaskaFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_maskaformer import ( MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST, MaskaFormerForUniversalSegmentation, MaskaFormerModel, MaskaFormerPreTrainedModel, ) else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
251
'''simple docstring''' import datasets from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py UpperCAmelCase_ = '\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",\n author = "Lin, Chin-Yew and\n Och, Franz Josef",\n booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",\n month = "aug 23{--}aug 27",\n year = "2004",\n address = "Geneva, Switzerland",\n publisher = "COLING",\n url = "https://www.aclweb.org/anthology/C04-1072",\n pages = "501--507",\n}\n' UpperCAmelCase_ = '\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,\nthe better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n' UpperCAmelCase_ = '\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n \'bleu\': bleu score,\n \'precisions\': geometric mean of n-gram precisions,\n \'brevity_penalty\': brevity penalty,\n \'length_ratio\': ratio of lengths,\n \'translation_length\': translation_length,\n \'reference_length\': reference_length\nExamples:\n\n >>> predictions = [\n ... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample\n ... ["foo", "bar", "foobar"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)\n ... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric("bleu")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results["bleu"])\n 1.0\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase_ ( datasets.Metric ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : int ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ), """references""": datasets.Sequence( datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ), } ) , codebase_urls=["""https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"""] , reference_urls=[ """https://en.wikipedia.org/wiki/BLEU""", """https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""", ] , ) def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any]=4 , _UpperCAmelCase : Union[str, Any]=False ): """simple docstring""" UpperCAmelCase__ = compute_bleu( reference_corpus=_UpperCAmelCase , translation_corpus=_UpperCAmelCase , max_order=_UpperCAmelCase , smooth=_UpperCAmelCase ) ((UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__)) = score return { "bleu": bleu, "precisions": precisions, "brevity_penalty": bp, "length_ratio": ratio, "translation_length": translation_length, "reference_length": reference_length, }
346
0
"""simple docstring""" from pathlib import PurePosixPath from typing import Optional import fsspec from fsspec import AbstractFileSystem from huggingface_hub.hf_api import DatasetInfo from ..utils.file_utils import get_authentication_headers_for_url from ..utils.hub import hf_hub_url class A__ ( lowerCamelCase_ ): '''simple docstring''' SCREAMING_SNAKE_CASE = """""" SCREAMING_SNAKE_CASE = """hf-legacy""" # "hf://"" is reserved for hffs def __init__( self: Dict , _SCREAMING_SNAKE_CASE: Optional[DatasetInfo] = None , _SCREAMING_SNAKE_CASE: Optional[str] = None , **_SCREAMING_SNAKE_CASE: List[str] , ) -> List[str]: """simple docstring""" super().__init__(self , **_UpperCAmelCase) __lowerCAmelCase : Any = repo_info __lowerCAmelCase : int = token __lowerCAmelCase : Optional[Any] = None def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> int: """simple docstring""" if self.dir_cache is None: __lowerCAmelCase : Dict = {} for hf_file in self.repo_info.siblings: # TODO(QL): add sizes __lowerCAmelCase : Any = { "name": hf_file.rfilename, "size": None, "type": "file", } self.dir_cache.update( { str(_UpperCAmelCase): {"name": str(_UpperCAmelCase), "size": None, "type": "directory"} for d in list(PurePosixPath(hf_file.rfilename).parents)[:-1] }) def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: str = "rb" , **_SCREAMING_SNAKE_CASE: Optional[Any] , ) -> Tuple: """simple docstring""" if not isinstance(self.repo_info , _UpperCAmelCase): raise NotImplementedError(F"""Open is only implemented for dataset repositories, but got {self.repo_info}""") __lowerCAmelCase : Dict = hf_hub_url(self.repo_info.id , _UpperCAmelCase , revision=self.repo_info.sha) return fsspec.open( _UpperCAmelCase , mode=_UpperCAmelCase , headers=get_authentication_headers_for_url(_UpperCAmelCase , use_auth_token=self.token) , client_kwargs={"trust_env": True} , ).open() def _SCREAMING_SNAKE_CASE ( self: List[str] , _SCREAMING_SNAKE_CASE: List[str] , **_SCREAMING_SNAKE_CASE: List[Any]) -> Dict: """simple docstring""" self._get_dirs() __lowerCAmelCase : str = self._strip_protocol(_UpperCAmelCase) if path in self.dir_cache: return self.dir_cache[path] else: raise FileNotFoundError(_UpperCAmelCase) def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Any=False , **_SCREAMING_SNAKE_CASE: Union[str, Any]) -> Optional[int]: """simple docstring""" self._get_dirs() __lowerCAmelCase : int = PurePosixPath(path.strip("/")) __lowerCAmelCase : Tuple = {} for p, f in self.dir_cache.items(): __lowerCAmelCase : Tuple = PurePosixPath(p.strip("/")) __lowerCAmelCase : Optional[Any] = p.parent if root == path: __lowerCAmelCase : List[str] = f __lowerCAmelCase : Dict = list(paths.values()) if detail: return out else: return sorted(f["name"] for f in out)
269
'''simple docstring''' from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import torch from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available @dataclass class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' lowerCAmelCase_ : Union[List[np.ndarray], torch.FloatTensor] try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_text_to_video_synth import TextToVideoSDPipeline from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401 from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
346
0
from typing import List, Optional, Tuple, Union import torch from ...utils import logging, randn_tensor from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline __a = logging.get_logger(__name__) # pylint: disable=invalid-name class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ ): def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): super().__init__() self.register_modules(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase ) @torch.no_grad() def __call__( self , SCREAMING_SNAKE_CASE__ = 1 , SCREAMING_SNAKE_CASE__ = 100 , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = True , ): if audio_length_in_s is None: lowercase : Dict = self.unet.config.sample_size / self.unet.config.sample_rate lowercase : Any = audio_length_in_s * self.unet.config.sample_rate lowercase : Optional[Any] = 2 ** len(self.unet.up_blocks ) if sample_size < 3 * down_scale_factor: raise ValueError( f"""{audio_length_in_s} is too small. Make sure it\'s bigger or equal to""" f""" {3 * down_scale_factor / self.unet.config.sample_rate}.""" ) lowercase : str = int(_UpperCAmelCase ) if sample_size % down_scale_factor != 0: lowercase : Dict = ( (audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1 ) * down_scale_factor logger.info( f"""{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled""" f""" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising""" ''' process.''' ) lowercase : Any = int(_UpperCAmelCase ) lowercase : Optional[Any] = next(iter(self.unet.parameters() ) ).dtype lowercase : Dict = (batch_size, self.unet.config.in_channels, sample_size) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and len(_UpperCAmelCase ) != batch_size: raise ValueError( f"""You have passed a list of generators of length {len(_UpperCAmelCase )}, but requested an effective batch""" f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" ) lowercase : Union[str, Any] = randn_tensor(_UpperCAmelCase , generator=_UpperCAmelCase , device=self.device , dtype=_UpperCAmelCase ) # set step values self.scheduler.set_timesteps(_UpperCAmelCase , device=audio.device ) lowercase : List[str] = self.scheduler.timesteps.to(_UpperCAmelCase ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output lowercase : List[Any] = self.unet(_UpperCAmelCase , _UpperCAmelCase ).sample # 2. compute previous image: x_t -> t_t-1 lowercase : List[Any] = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample lowercase : Optional[int] = audio.clamp(-1 , 1 ).float().cpu().numpy() lowercase : str = audio[:, :, :original_sample_size] if not return_dict: return (audio,) return AudioPipelineOutput(audios=_UpperCAmelCase )
337
'''simple docstring''' import torch import torch.nn as nn from transformers.modeling_utils import ModuleUtilsMixin from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class lowerCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): '''simple docstring''' @register_to_config def __init__( self : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : float , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : bool = False , ): """simple docstring""" super().__init__() UpperCAmelCase__ = nn.Embedding(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = nn.Embedding(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = False UpperCAmelCase__ = nn.Dropout(p=_UpperCAmelCase ) UpperCAmelCase__ = TaConfig( vocab_size=_UpperCAmelCase , d_model=_UpperCAmelCase , num_heads=_UpperCAmelCase , d_kv=_UpperCAmelCase , d_ff=_UpperCAmelCase , dropout_rate=_UpperCAmelCase , feed_forward_proj=_UpperCAmelCase , is_decoder=_UpperCAmelCase , is_encoder_decoder=_UpperCAmelCase , ) UpperCAmelCase__ = nn.ModuleList() for lyr_num in range(_UpperCAmelCase ): UpperCAmelCase__ = TaBlock(_UpperCAmelCase ) self.encoders.append(_UpperCAmelCase ) UpperCAmelCase__ = TaLayerNorm(_UpperCAmelCase ) UpperCAmelCase__ = nn.Dropout(p=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : int , _UpperCAmelCase : str ): """simple docstring""" UpperCAmelCase__ = self.token_embedder(_UpperCAmelCase ) UpperCAmelCase__ = encoder_input_tokens.shape[1] UpperCAmelCase__ = torch.arange(_UpperCAmelCase , device=encoder_input_tokens.device ) x += self.position_encoding(_UpperCAmelCase ) UpperCAmelCase__ = self.dropout_pre(_UpperCAmelCase ) # inverted the attention mask UpperCAmelCase__ = encoder_input_tokens.size() UpperCAmelCase__ = self.get_extended_attention_mask(_UpperCAmelCase , _UpperCAmelCase ) for lyr in self.encoders: UpperCAmelCase__ = lyr(_UpperCAmelCase , _UpperCAmelCase )[0] UpperCAmelCase__ = self.layer_norm(_UpperCAmelCase ) return self.dropout_post(_UpperCAmelCase ), encoder_inputs_mask
346
0
from functools import lru_cache @lru_cache def snake_case__ ( SCREAMING_SNAKE_CASE_ : int ): '''simple docstring''' if num < 0: raise ValueError('Number should not be negative.' ) return 1 if num in (0, 1) else num * factorial(num - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
214
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConfig, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaForPreTraining, WavaVecaProcessor, logging, ) from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification logging.set_verbosity_info() UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'adapter_layer': 'encoder.layers.*.adapter_layer', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', 'pooling_layer.linear': 'projector', 'pooling_layer.projection': 'classifier', } UpperCAmelCase_ = [ 'lm_head', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', 'projector', 'classifier', ] def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Tuple ): '''simple docstring''' UpperCAmelCase__ = {} with open(SCREAMING_SNAKE_CASE__ , """r""" ) as file: for line_number, line in enumerate(SCREAMING_SNAKE_CASE__ ): UpperCAmelCase__ = line.strip() if line: UpperCAmelCase__ = line.split() UpperCAmelCase__ = line_number UpperCAmelCase__ = words[0] UpperCAmelCase__ = value return result def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ): '''simple docstring''' for attribute in key.split(""".""" ): UpperCAmelCase__ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(SCREAMING_SNAKE_CASE__ ): UpperCAmelCase__ = PARAM_MAPPING[full_name.split(""".""" )[-1]] UpperCAmelCase__ = """param""" if weight_type is not None and weight_type != "param": UpperCAmelCase__ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).shape elif weight_type is not None and weight_type == "param": UpperCAmelCase__ = hf_pointer for attribute in hf_param_name.split(""".""" ): UpperCAmelCase__ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = shape_pointer.shape # let's reduce dimension UpperCAmelCase__ = value[0] else: UpperCAmelCase__ = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": UpperCAmelCase__ = value elif weight_type == "weight_g": UpperCAmelCase__ = value elif weight_type == "weight_v": UpperCAmelCase__ = value elif weight_type == "bias": UpperCAmelCase__ = value elif weight_type == "param": for attribute in hf_param_name.split(""".""" ): UpperCAmelCase__ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = value else: UpperCAmelCase__ = value logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' ) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(SCREAMING_SNAKE_CASE__ ): UpperCAmelCase__ = PARAM_MAPPING[full_name.split(""".""" )[-1]] UpperCAmelCase__ = """param""" if weight_type is not None and weight_type != "param": UpperCAmelCase__ = """.""".join([key, weight_type] ) elif weight_type is not None and weight_type == "param": UpperCAmelCase__ = """.""".join([key, hf_param_name] ) else: UpperCAmelCase__ = key UpperCAmelCase__ = value if """lm_head""" in full_key else value[0] UpperCAmelCase_ = { 'W_a': 'linear_1.weight', 'W_b': 'linear_2.weight', 'b_a': 'linear_1.bias', 'b_b': 'linear_2.bias', 'ln_W': 'norm.weight', 'ln_b': 'norm.bias', } def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None ): '''simple docstring''' UpperCAmelCase__ = False for key, mapped_key in MAPPING.items(): UpperCAmelCase__ = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: UpperCAmelCase__ = True if "*" in mapped_key: UpperCAmelCase__ = name.split(SCREAMING_SNAKE_CASE__ )[0].split(""".""" )[-2] UpperCAmelCase__ = mapped_key.replace("""*""" , SCREAMING_SNAKE_CASE__ ) if "weight_g" in name: UpperCAmelCase__ = """weight_g""" elif "weight_v" in name: UpperCAmelCase__ = """weight_v""" elif "bias" in name: UpperCAmelCase__ = """bias""" elif "weight" in name: # TODO: don't match quantizer.weight_proj UpperCAmelCase__ = """weight""" else: UpperCAmelCase__ = None if hf_dict is not None: rename_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else: set_recursively(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return is_used return is_used def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] ): '''simple docstring''' UpperCAmelCase__ = [] UpperCAmelCase__ = fairseq_model.state_dict() UpperCAmelCase__ = hf_model.wavaveca.feature_extractor for name, value in fairseq_dict.items(): UpperCAmelCase__ = False if "conv_layers" in name: load_conv_layer( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , hf_model.config.feat_extract_norm == """group""" , ) UpperCAmelCase__ = True else: UpperCAmelCase__ = load_wavaveca_layer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if not is_used: unused_weights.append(SCREAMING_SNAKE_CASE__ ) logger.warning(F'''Unused weights: {unused_weights}''' ) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' UpperCAmelCase__ = full_name.split("""conv_layers.""" )[-1] UpperCAmelCase__ = name.split(""".""" ) UpperCAmelCase__ = int(items[0] ) UpperCAmelCase__ = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) UpperCAmelCase__ = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) UpperCAmelCase__ = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' ) UpperCAmelCase__ = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' ) UpperCAmelCase__ = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(SCREAMING_SNAKE_CASE__ ) @torch.no_grad() def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False ): '''simple docstring''' if config_path is not None: UpperCAmelCase__ = WavaVecaConfig.from_pretrained(SCREAMING_SNAKE_CASE__ ) else: UpperCAmelCase__ = WavaVecaConfig() if is_seq_class: UpperCAmelCase__ = read_txt_into_dict(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = idalabel UpperCAmelCase__ = WavaVecaForSequenceClassification(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , ) feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE__ ) elif is_finetuned: if dict_path: UpperCAmelCase__ = Dictionary.load(SCREAMING_SNAKE_CASE__ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq UpperCAmelCase__ = target_dict.pad_index UpperCAmelCase__ = target_dict.bos_index UpperCAmelCase__ = target_dict.eos_index UpperCAmelCase__ = len(target_dict.symbols ) UpperCAmelCase__ = os.path.join(SCREAMING_SNAKE_CASE__ , """vocab.json""" ) if not os.path.isdir(SCREAMING_SNAKE_CASE__ ): logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(SCREAMING_SNAKE_CASE__ ) ) return os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = target_dict.indices # fairseq has the <pad> and <s> switched UpperCAmelCase__ = 0 UpperCAmelCase__ = 1 with open(SCREAMING_SNAKE_CASE__ , """w""" , encoding="""utf-8""" ) as vocab_handle: json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = WavaVecaCTCTokenizer( SCREAMING_SNAKE_CASE__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=SCREAMING_SNAKE_CASE__ , ) UpperCAmelCase__ = True if config.feat_extract_norm == """layer""" else False UpperCAmelCase__ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , ) UpperCAmelCase__ = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ ) processor.save_pretrained(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = WavaVecaForCTC(SCREAMING_SNAKE_CASE__ ) else: UpperCAmelCase__ = WavaVecaForPreTraining(SCREAMING_SNAKE_CASE__ ) if is_finetuned or is_seq_class: UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) else: UpperCAmelCase__ = argparse.Namespace(task="""audio_pretraining""" ) UpperCAmelCase__ = fairseq.tasks.setup_task(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = model[0].eval() recursively_load_weights(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , not is_finetuned ) hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not' ) parser.add_argument( '--is_seq_class', action='store_true', help='Whether the model to convert is a fine-tuned sequence classification model or not', ) UpperCAmelCase_ = parser.parse_args() UpperCAmelCase_ = not args.not_finetuned and not args.is_seq_class convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, is_finetuned, args.is_seq_class, )
346
0
import fire from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer def _UpperCamelCase ( lowercase__ , lowercase__ , **lowercase__ ): __SCREAMING_SNAKE_CASE : Tuple = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) __SCREAMING_SNAKE_CASE : Union[str, Any] = AutoModelForSeqaSeqLM.from_config(SCREAMING_SNAKE_CASE__ ) model.save_pretrained(SCREAMING_SNAKE_CASE__ ) AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ ).save_pretrained(SCREAMING_SNAKE_CASE__ ) return model if __name__ == "__main__": fire.Fire(save_randomly_initialized_version)
9
'''simple docstring''' import itertools import os from collections import Counter, defaultdict from concurrent.futures import ThreadPoolExecutor, as_completed import numpy as np import datasets from .execute import check_correctness UpperCAmelCase_ = '\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n' UpperCAmelCase_ = '\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper "Evaluating Large Language Models Trained on Code"\n(https://arxiv.org/abs/2107.03374).\n' UpperCAmelCase_ = '\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric("code_eval")\n >>> test_cases = ["assert add(2,3)==5"]\n >>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {\'pass@1\': 0.5, \'pass@2\': 1.0}\n' UpperCAmelCase_ = '\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe "code_eval" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper "Evaluating Large\nLanguage Models Trained on Code" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"\n\n################################################################################\\n' UpperCAmelCase_ = 'The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the "Software"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase_ ( datasets.Metric ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" return datasets.MetricInfo( # This is the description that will appear on the metrics page. description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""" ) ), """references""": datasets.Value("""string""" ), } ) , homepage="""https://github.com/openai/human-eval""" , codebase_urls=["""https://github.com/openai/human-eval"""] , reference_urls=["""https://github.com/openai/human-eval"""] , license=_LICENSE , ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str]=[1, 10, 1_00] , _UpperCAmelCase : Optional[Any]=4 , _UpperCAmelCase : Any=3.0 ): """simple docstring""" if os.getenv("""HF_ALLOW_CODE_EVAL""" , 0 ) != "1": raise ValueError(_WARNING ) if os.name == "nt": raise NotImplementedError("""This metric is currently not supported on Windows.""" ) with ThreadPoolExecutor(max_workers=_UpperCAmelCase ) as executor: UpperCAmelCase__ = [] UpperCAmelCase__ = Counter() UpperCAmelCase__ = 0 UpperCAmelCase__ = defaultdict(_UpperCAmelCase ) for task_id, (candidates, test_case) in enumerate(zip(_UpperCAmelCase , _UpperCAmelCase ) ): for candidate in candidates: UpperCAmelCase__ = candidate + """\n""" + test_case UpperCAmelCase__ = (test_program, timeout, task_id, completion_id[task_id]) UpperCAmelCase__ = executor.submit(_UpperCAmelCase , *_UpperCAmelCase ) futures.append(_UpperCAmelCase ) completion_id[task_id] += 1 n_samples += 1 for future in as_completed(_UpperCAmelCase ): UpperCAmelCase__ = future.result() results[result["task_id"]].append((result["""completion_id"""], result) ) UpperCAmelCase__ , UpperCAmelCase__ = [], [] for result in results.values(): result.sort() UpperCAmelCase__ = [r[1]["""passed"""] for r in result] total.append(len(_UpperCAmelCase ) ) correct.append(sum(_UpperCAmelCase ) ) UpperCAmelCase__ = np.array(_UpperCAmelCase ) UpperCAmelCase__ = np.array(_UpperCAmelCase ) UpperCAmelCase__ = k UpperCAmelCase__ = {f'''pass@{k}''': estimate_pass_at_k(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).mean() for k in ks if (total >= k).all()} return pass_at_k, results def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] ): '''simple docstring''' def estimator(SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> float: if n - c < k: return 1.0 return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): UpperCAmelCase__ = itertools.repeat(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ) else: assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = iter(SCREAMING_SNAKE_CASE__ ) return np.array([estimator(int(SCREAMING_SNAKE_CASE__ ) , int(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) for n, c in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )] )
346
0
"""simple docstring""" import math import sys import cva import numpy as np def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> List[str]: lowercase__: int = math.sqrt(SCREAMING_SNAKE_CASE__ ) lowercase__: Optional[Any] = 1 / (sigma * math.sqrt(2 * math.pi )) return cons * np.exp(-((img / sigma) ** 2) * 0.5 ) def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]: lowercase__: int = kernel_size // 2 return img[x - half : x + half + 1, y - half : y + half + 1] def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]: lowercase__: int = np.zeros((kernel_size, kernel_size) ) for i in range(0 , SCREAMING_SNAKE_CASE__ ): for j in range(0 , SCREAMING_SNAKE_CASE__ ): lowercase__: Tuple = math.sqrt( abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 ) return vec_gaussian(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> List[str]: lowercase__: Any = np.zeros(img.shape ) lowercase__: Dict = get_gauss_kernel(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) lowercase__, lowercase__: List[str] = img.shape for i in range(kernel_size // 2 , size_x - kernel_size // 2 ): for j in range(kernel_size // 2 , size_y - kernel_size // 2 ): lowercase__: Tuple = get_slice(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) lowercase__: Union[str, Any] = img_s - img_s[kernel_size // 2, kernel_size // 2] lowercase__: Optional[int] = vec_gaussian(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) lowercase__: Any = np.multiply(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) lowercase__: int = np.multiply(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) lowercase__: int = np.sum(SCREAMING_SNAKE_CASE__ ) / np.sum(SCREAMING_SNAKE_CASE__ ) lowercase__: Optional[Any] = val return imga def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> int: lowercase__: List[str] = args[1] if args[1:] else '''../image_data/lena.jpg''' lowercase__: int = float(args[2] ) if args[2:] else 1.0 lowercase__: List[str] = float(args[3] ) if args[3:] else 1.0 if args[4:]: lowercase__: str = int(args[4] ) lowercase__: Any = kernel_size + abs(kernel_size % 2 - 1 ) else: lowercase__: Tuple = 5 return filename, spatial_variance, intensity_variance, kernel_size if __name__ == "__main__": __A ,__A ,__A ,__A = parse_args(sys.argv) __A = cva.imread(filename, 0) cva.imshow("input image", img) __A = img / 2_5_5 __A = out.astype("float32") __A = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size) __A = out * 2_5_5 __A = np.uinta(out) cva.imshow("output image", out) cva.waitKey(0) cva.destroyAllWindows()
177
'''simple docstring''' import math def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or not number % 2: # Negatives, 0, 1 and all even numbers are not primes return False UpperCAmelCase__ = range(3 , int(math.sqrt(SCREAMING_SNAKE_CASE__ ) + 1 ) , 2 ) return not any(not number % i for i in odd_numbers ) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str]=1 , **SCREAMING_SNAKE_CASE__ : List[str] ): '''simple docstring''' UpperCAmelCase__ = factor * value UpperCAmelCase__ = value while not is_prime(SCREAMING_SNAKE_CASE__ ): value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1 if value == first_value_val: return next_prime(value + 1 , **SCREAMING_SNAKE_CASE__ ) return value
346
0
'''simple docstring''' import math def __UpperCamelCase ( UpperCAmelCase ): lowercase__ : Dict = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 ) return exponent == int(SCREAMING_SNAKE_CASE__ ) def __UpperCamelCase ( UpperCAmelCase = 1 / 1_2345 ): lowercase__ : int = 0 lowercase__ : List[str] = 0 lowercase__ : List[str] = 3 while True: lowercase__ : str = (integer**2 - 1) / 4 # if candidate is an integer, then there is a partition for k if partition_candidate == int(SCREAMING_SNAKE_CASE__ ): lowercase__ : Dict = int(SCREAMING_SNAKE_CASE__ ) total_partitions += 1 if check_partition_perfect(SCREAMING_SNAKE_CASE__ ): perfect_partitions += 1 if perfect_partitions > 0: if perfect_partitions / total_partitions < max_proportion: return int(SCREAMING_SNAKE_CASE__ ) integer += 1 if __name__ == "__main__": print(F'{solution() = }')
198
'''simple docstring''' import string from math import logaa def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' UpperCAmelCase__ = document.translate( str.maketrans("""""" , """""" , string.punctuation ) ).replace("""\n""" , """""" ) UpperCAmelCase__ = document_without_punctuation.split(""" """ ) # word tokenization return len([word for word in tokenize_document if word.lower() == term.lower()] ) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' UpperCAmelCase__ = corpus.lower().translate( str.maketrans("""""" , """""" , string.punctuation ) ) # strip all punctuation and replace it with '' UpperCAmelCase__ = corpus_without_punctuation.split("""\n""" ) UpperCAmelCase__ = term.lower() return (len([doc for doc in docs if term in doc] ), len(SCREAMING_SNAKE_CASE__ )) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple=False ): '''simple docstring''' if smoothing: if n == 0: raise ValueError("""log10(0) is undefined.""" ) return round(1 + logaa(n / (1 + df) ) , 3 ) if df == 0: raise ZeroDivisionError("""df must be > 0""" ) elif n == 0: raise ValueError("""log10(0) is undefined.""" ) return round(logaa(n / df ) , 3 ) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' return round(tf * idf , 3 )
346
0
import numpy as np def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> List[str]: return 1 / (1 + np.exp(-vector )) if __name__ == "__main__": import doctest doctest.testmod()
189
'''simple docstring''' import argparse import torch from transformers import BertForMaskedLM if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser( description=( 'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned' ' Distillation' ) ) parser.add_argument('--model_type', default='bert', choices=['bert']) parser.add_argument('--model_name', default='bert-base-uncased', type=str) parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str) parser.add_argument('--vocab_transform', action='store_true') UpperCAmelCase_ = parser.parse_args() if args.model_type == "bert": UpperCAmelCase_ = BertForMaskedLM.from_pretrained(args.model_name) UpperCAmelCase_ = 'bert' else: raise ValueError('args.model_type should be "bert".') UpperCAmelCase_ = model.state_dict() UpperCAmelCase_ = {} for w in ["word_embeddings", "position_embeddings"]: UpperCAmelCase_ = state_dict[f"{prefix}.embeddings.{w}.weight"] for w in ["weight", "bias"]: UpperCAmelCase_ = state_dict[f"{prefix}.embeddings.LayerNorm.{w}"] UpperCAmelCase_ = 0 for teacher_idx in [0, 2, 4, 7, 9, 1_1]: for w in ["weight", "bias"]: UpperCAmelCase_ = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}" ] UpperCAmelCase_ = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}" ] UpperCAmelCase_ = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}" ] UpperCAmelCase_ = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}" ] UpperCAmelCase_ = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}" ] UpperCAmelCase_ = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}" ] UpperCAmelCase_ = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}" ] UpperCAmelCase_ = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}" ] std_idx += 1 UpperCAmelCase_ = state_dict['cls.predictions.decoder.weight'] UpperCAmelCase_ = state_dict['cls.predictions.bias'] if args.vocab_transform: for w in ["weight", "bias"]: UpperCAmelCase_ = state_dict[f"cls.predictions.transform.dense.{w}"] UpperCAmelCase_ = state_dict[f"cls.predictions.transform.LayerNorm.{w}"] print(f"N layers selected for distillation: {std_idx}") print(f"Number of params transferred for distillation: {len(compressed_sd.keys())}") print(f"Save transferred checkpoint to {args.dump_checkpoint}.") torch.save(compressed_sd, args.dump_checkpoint)
346
0
from __future__ import annotations def _a ( SCREAMING_SNAKE_CASE_ : list[int | str] ): create_state_space_tree(SCREAMING_SNAKE_CASE__ , [] , 0 , [0 for i in range(len(SCREAMING_SNAKE_CASE__ ) )] ) def _a ( SCREAMING_SNAKE_CASE_ : list[int | str] , SCREAMING_SNAKE_CASE_ : list[int | str] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : list[int] , ): if index == len(SCREAMING_SNAKE_CASE__ ): print(SCREAMING_SNAKE_CASE__ ) return for i in range(len(SCREAMING_SNAKE_CASE__ ) ): if not index_used[i]: current_sequence.append(sequence[i] ) __lowerCAmelCase = True create_state_space_tree(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , index + 1 , SCREAMING_SNAKE_CASE__ ) current_sequence.pop() __lowerCAmelCase = False UpperCamelCase__ = [3, 1, 2, 4] generate_all_permutations(sequence) UpperCamelCase__ = ["""A""", """B""", """C"""] generate_all_permutations(sequence_a)
92
'''simple docstring''' import tempfile import torch from diffusers import PNDMScheduler from .test_schedulers import SchedulerCommonTest class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' lowerCAmelCase_ : Union[str, Any] = (PNDMScheduler,) lowerCAmelCase_ : Optional[int] = (("""num_inference_steps""", 50),) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , **_UpperCAmelCase : Optional[int] ): """simple docstring""" UpperCAmelCase__ = { """num_train_timesteps""": 10_00, """beta_start""": 0.0001, """beta_end""": 0.02, """beta_schedule""": """linear""", } config.update(**_UpperCAmelCase ) return config def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : Tuple=0 , **_UpperCAmelCase : List[str] ): """simple docstring""" UpperCAmelCase__ = dict(self.forward_default_kwargs ) UpperCAmelCase__ = kwargs.pop("""num_inference_steps""" , _UpperCAmelCase ) UpperCAmelCase__ = self.dummy_sample UpperCAmelCase__ = 0.1 * sample UpperCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: UpperCAmelCase__ = self.get_scheduler_config(**_UpperCAmelCase ) UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residuals UpperCAmelCase__ = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_UpperCAmelCase ) UpperCAmelCase__ = scheduler_class.from_pretrained(_UpperCAmelCase ) new_scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residuals UpperCAmelCase__ = dummy_past_residuals[:] UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample UpperCAmelCase__ = new_scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample UpperCAmelCase__ = new_scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" pass def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : Union[str, Any]=0 , **_UpperCAmelCase : Optional[int] ): """simple docstring""" UpperCAmelCase__ = dict(self.forward_default_kwargs ) UpperCAmelCase__ = kwargs.pop("""num_inference_steps""" , _UpperCAmelCase ) UpperCAmelCase__ = self.dummy_sample UpperCAmelCase__ = 0.1 * sample UpperCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: UpperCAmelCase__ = self.get_scheduler_config() UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residuals (must be after setting timesteps) UpperCAmelCase__ = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_UpperCAmelCase ) UpperCAmelCase__ = scheduler_class.from_pretrained(_UpperCAmelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residual (must be after setting timesteps) UpperCAmelCase__ = dummy_past_residuals[:] UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample UpperCAmelCase__ = new_scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample UpperCAmelCase__ = new_scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def SCREAMING_SNAKE_CASE__ ( self : int , **_UpperCAmelCase : Tuple ): """simple docstring""" UpperCAmelCase__ = self.scheduler_classes[0] UpperCAmelCase__ = self.get_scheduler_config(**_UpperCAmelCase ) UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) UpperCAmelCase__ = 10 UpperCAmelCase__ = self.dummy_model() UpperCAmelCase__ = self.dummy_sample_deter scheduler.set_timesteps(_UpperCAmelCase ) for i, t in enumerate(scheduler.prk_timesteps ): UpperCAmelCase__ = model(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample for i, t in enumerate(scheduler.plms_timesteps ): UpperCAmelCase__ = model(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample return sample def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" UpperCAmelCase__ = dict(self.forward_default_kwargs ) UpperCAmelCase__ = kwargs.pop("""num_inference_steps""" , _UpperCAmelCase ) for scheduler_class in self.scheduler_classes: UpperCAmelCase__ = self.get_scheduler_config() UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) UpperCAmelCase__ = self.dummy_sample UpperCAmelCase__ = 0.1 * sample if num_inference_steps is not None and hasattr(_UpperCAmelCase , """set_timesteps""" ): scheduler.set_timesteps(_UpperCAmelCase ) elif num_inference_steps is not None and not hasattr(_UpperCAmelCase , """set_timesteps""" ): UpperCAmelCase__ = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) UpperCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] UpperCAmelCase__ = dummy_past_residuals[:] UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , 0 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , 1 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , 0 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , 1 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def SCREAMING_SNAKE_CASE__ ( self : int ): """simple docstring""" for timesteps in [1_00, 10_00]: self.check_over_configs(num_train_timesteps=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" for steps_offset in [0, 1]: self.check_over_configs(steps_offset=_UpperCAmelCase ) UpperCAmelCase__ = self.scheduler_classes[0] UpperCAmelCase__ = self.get_scheduler_config(steps_offset=1 ) UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(10 ) assert torch.equal( scheduler.timesteps , torch.LongTensor( [9_01, 8_51, 8_51, 8_01, 8_01, 7_51, 7_51, 7_01, 7_01, 6_51, 6_51, 6_01, 6_01, 5_01, 4_01, 3_01, 2_01, 1_01, 1] ) , ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): """simple docstring""" for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ): self.check_over_configs(beta_start=_UpperCAmelCase , beta_end=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" for t in [1, 5, 10]: self.check_over_forward(time_step=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00] ): self.check_over_forward(num_inference_steps=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" UpperCAmelCase__ = 27 for scheduler_class in self.scheduler_classes: UpperCAmelCase__ = self.dummy_sample UpperCAmelCase__ = 0.1 * sample UpperCAmelCase__ = self.get_scheduler_config() UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(_UpperCAmelCase ) # before power of 3 fix, would error on first step, so we only need to do two for i, t in enumerate(scheduler.prk_timesteps[:2] ): UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample def SCREAMING_SNAKE_CASE__ ( self : Dict ): """simple docstring""" with self.assertRaises(_UpperCAmelCase ): UpperCAmelCase__ = self.scheduler_classes[0] UpperCAmelCase__ = self.get_scheduler_config() UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): """simple docstring""" UpperCAmelCase__ = self.full_loop() UpperCAmelCase__ = torch.sum(torch.abs(_UpperCAmelCase ) ) UpperCAmelCase__ = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_sum.item() - 198.1318 ) < 1E-2 assert abs(result_mean.item() - 0.2580 ) < 1E-3 def SCREAMING_SNAKE_CASE__ ( self : Dict ): """simple docstring""" UpperCAmelCase__ = self.full_loop(prediction_type="""v_prediction""" ) UpperCAmelCase__ = torch.sum(torch.abs(_UpperCAmelCase ) ) UpperCAmelCase__ = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_sum.item() - 67.3986 ) < 1E-2 assert abs(result_mean.item() - 0.0878 ) < 1E-3 def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" UpperCAmelCase__ = self.full_loop(set_alpha_to_one=_UpperCAmelCase , beta_start=0.01 ) UpperCAmelCase__ = torch.sum(torch.abs(_UpperCAmelCase ) ) UpperCAmelCase__ = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_sum.item() - 230.0399 ) < 1E-2 assert abs(result_mean.item() - 0.2995 ) < 1E-3 def SCREAMING_SNAKE_CASE__ ( self : int ): """simple docstring""" UpperCAmelCase__ = self.full_loop(set_alpha_to_one=_UpperCAmelCase , beta_start=0.01 ) UpperCAmelCase__ = torch.sum(torch.abs(_UpperCAmelCase ) ) UpperCAmelCase__ = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_sum.item() - 186.9482 ) < 1E-2 assert abs(result_mean.item() - 0.2434 ) < 1E-3
346
0
import random def lowerCamelCase__ ( _A , _A , _A ): '''simple docstring''' snake_case_ = a[left_index] snake_case_ = left_index + 1 for j in range(left_index + 1 , SCREAMING_SNAKE_CASE__ ): if a[j] < pivot: snake_case_ , snake_case_ = a[i], a[j] i += 1 snake_case_ , snake_case_ = a[i - 1], a[left_index] return i - 1 def lowerCamelCase__ ( _A , _A , _A ): '''simple docstring''' if left < right: snake_case_ = random.randint(SCREAMING_SNAKE_CASE__ , right - 1 ) snake_case_ , snake_case_ = ( a[left], a[pivot], ) # switches the pivot with the left most bound snake_case_ = partition(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) quick_sort_random( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # recursive quicksort to the left of the pivot point quick_sort_random( SCREAMING_SNAKE_CASE__ , pivot_index + 1 , SCREAMING_SNAKE_CASE__ ) # recursive quicksort to the right of the pivot point def lowerCamelCase__ ( ): '''simple docstring''' snake_case_ = input("Enter numbers separated by a comma:\n" ).strip() snake_case_ = [int(SCREAMING_SNAKE_CASE__ ) for item in user_input.split("," )] quick_sort_random(SCREAMING_SNAKE_CASE__ , 0 , len(SCREAMING_SNAKE_CASE__ ) ) print(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": main()
187
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { 'google/vivit-b-16x2-kinetics400': ( 'https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json' ), # See all Vivit models at https://huggingface.co/models?filter=vivit } class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' lowerCAmelCase_ : Optional[int] = """vivit""" def __init__( self : List[str] , _UpperCAmelCase : List[Any]=2_24 , _UpperCAmelCase : List[str]=32 , _UpperCAmelCase : Any=[2, 16, 16] , _UpperCAmelCase : int=3 , _UpperCAmelCase : Optional[Any]=7_68 , _UpperCAmelCase : Union[str, Any]=12 , _UpperCAmelCase : Dict=12 , _UpperCAmelCase : Optional[Any]=30_72 , _UpperCAmelCase : Optional[int]="gelu_fast" , _UpperCAmelCase : Union[str, Any]=0.0 , _UpperCAmelCase : Tuple=0.0 , _UpperCAmelCase : Optional[int]=0.02 , _UpperCAmelCase : List[Any]=1E-06 , _UpperCAmelCase : List[str]=True , **_UpperCAmelCase : List[Any] , ): """simple docstring""" UpperCAmelCase__ = hidden_size UpperCAmelCase__ = num_hidden_layers UpperCAmelCase__ = num_attention_heads UpperCAmelCase__ = intermediate_size UpperCAmelCase__ = hidden_act UpperCAmelCase__ = hidden_dropout_prob UpperCAmelCase__ = attention_probs_dropout_prob UpperCAmelCase__ = initializer_range UpperCAmelCase__ = layer_norm_eps UpperCAmelCase__ = image_size UpperCAmelCase__ = num_frames UpperCAmelCase__ = tubelet_size UpperCAmelCase__ = num_channels UpperCAmelCase__ = qkv_bias super().__init__(**_UpperCAmelCase )
346
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) class lowercase_ (lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : str = """timm_backbone""" def __init__( self : List[str] ,lowercase__ : int=None ,lowercase__ : Tuple=3 ,lowercase__ : Optional[Any]=True ,lowercase__ : Dict=True ,lowercase__ : List[Any]=None ,**lowercase__ : Dict ,): super().__init__(**_UpperCAmelCase ) __lowercase = backbone __lowercase = num_channels __lowercase = features_only __lowercase = use_pretrained_backbone __lowercase = True __lowercase = out_indices if out_indices is not None else (-1,)
104
'''simple docstring''' import warnings from ...utils import logging from .image_processing_deit import DeiTImageProcessor UpperCAmelCase_ = logging.get_logger(__name__) class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' def __init__( self : List[str] , *_UpperCAmelCase : Optional[int] , **_UpperCAmelCase : Optional[Any] ): """simple docstring""" warnings.warn( """The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use DeiTImageProcessor instead.""" , _UpperCAmelCase , ) super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
346
0
'''simple docstring''' import json import os import unittest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _a ( lowerCamelCase_ , unittest.TestCase ): '''simple docstring''' A : int = MgpstrTokenizer A : List[str] = False A : Optional[int] = {} A : Any = False def UpperCamelCase_ ( self ): '''simple docstring''' super().setUp() # fmt: off SCREAMING_SNAKE_CASE : int = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z'] # fmt: on SCREAMING_SNAKE_CASE : Dict = dict(zip(_UpperCAmelCase, range(len(_UpperCAmelCase ) ) ) ) SCREAMING_SNAKE_CASE : int = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file, 'w', encoding='utf-8' ) as fp: fp.write(json.dumps(_UpperCAmelCase ) + '\n' ) def UpperCamelCase_ ( self, **A ): '''simple docstring''' return MgpstrTokenizer.from_pretrained(self.tmpdirname, **_UpperCAmelCase ) def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = 'tester' SCREAMING_SNAKE_CASE : Optional[int] = 'tester' return input_text, output_text @unittest.skip('MGP-STR always lower cases letters.' ) def UpperCamelCase_ ( self ): '''simple docstring''' pass def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.get_tokenizers(do_lower_case=_UpperCAmelCase ) for tokenizer in tokenizers: with self.subTest(F"{tokenizer.__class__.__name__}" ): SCREAMING_SNAKE_CASE : List[Any] = '[SPECIAL_TOKEN]' tokenizer.add_special_tokens({'cls_token': special_token} ) SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode([special_token], add_special_tokens=_UpperCAmelCase ) self.assertEqual(len(_UpperCAmelCase ), 1 ) SCREAMING_SNAKE_CASE : Dict = tokenizer.decode(_UpperCAmelCase, skip_special_tokens=_UpperCAmelCase ) self.assertTrue(special_token not in decoded ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F"{tokenizer.__class__.__name__}" ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.get_input_output_texts(_UpperCAmelCase ) SCREAMING_SNAKE_CASE : List[str] = tokenizer.tokenize(_UpperCAmelCase ) SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase ) SCREAMING_SNAKE_CASE : str = tokenizer.convert_ids_to_tokens(_UpperCAmelCase ) self.assertNotEqual(len(_UpperCAmelCase ), 0 ) SCREAMING_SNAKE_CASE : Dict = tokenizer.decode(_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase ) self.assertEqual(text_a.replace(' ', '' ), _UpperCAmelCase ) @unittest.skip('MGP-STR tokenizer only handles one sequence.' ) def UpperCamelCase_ ( self ): '''simple docstring''' pass @unittest.skip('inputs cannot be pretokenized in MgpstrTokenizer' ) def UpperCamelCase_ ( self ): '''simple docstring''' pass
251
'''simple docstring''' import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = {'vocab_file': 'spiece.model'} UpperCAmelCase_ = { 'vocab_file': { 'TsinghuaAI/CPM-Generate': 'https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model', } } class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' def __init__( self : Dict , _UpperCAmelCase : str , _UpperCAmelCase : Any=False , _UpperCAmelCase : int=True , _UpperCAmelCase : Union[str, Any]=False , _UpperCAmelCase : Dict="<s>" , _UpperCAmelCase : int="</s>" , _UpperCAmelCase : Dict="<unk>" , _UpperCAmelCase : Tuple="<sep>" , _UpperCAmelCase : List[Any]="<pad>" , _UpperCAmelCase : int="<cls>" , _UpperCAmelCase : Union[str, Any]="<mask>" , _UpperCAmelCase : List[str]=["<eop>", "<eod>"] , _UpperCAmelCase : Optional[Dict[str, Any]] = None , **_UpperCAmelCase : int , ): """simple docstring""" UpperCAmelCase__ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token UpperCAmelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=_UpperCAmelCase , remove_space=_UpperCAmelCase , keep_accents=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , ) UpperCAmelCase__ = 3 UpperCAmelCase__ = do_lower_case UpperCAmelCase__ = remove_space UpperCAmelCase__ = keep_accents UpperCAmelCase__ = vocab_file UpperCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_UpperCAmelCase ) try: import jieba except ModuleNotFoundError as error: raise error.__class__( """You need to install jieba to use CpmTokenizer or CpmTokenizerFast. """ """See https://pypi.org/project/jieba/ for installation.""" ) UpperCAmelCase__ = jieba UpperCAmelCase__ = str.maketrans(""" \n""" , """\u2582\u2583""" ) @property # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" return len(self.sp_model ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" UpperCAmelCase__ = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Dict ): """simple docstring""" UpperCAmelCase__ = self.__dict__.copy() UpperCAmelCase__ = None return state def __setstate__( self : Union[str, Any] , _UpperCAmelCase : Union[str, Any] ): """simple docstring""" UpperCAmelCase__ = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): UpperCAmelCase__ = {} UpperCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : Optional[Any] ): """simple docstring""" if self.remove_space: UpperCAmelCase__ = """ """.join(inputs.strip().split() ) else: UpperCAmelCase__ = inputs UpperCAmelCase__ = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" ) if not self.keep_accents: UpperCAmelCase__ = unicodedata.normalize("""NFKD""" , _UpperCAmelCase ) UpperCAmelCase__ = """""".join([c for c in outputs if not unicodedata.combining(_UpperCAmelCase )] ) if self.do_lower_case: UpperCAmelCase__ = outputs.lower() return outputs def SCREAMING_SNAKE_CASE__ ( self : Tuple , _UpperCAmelCase : str ): """simple docstring""" UpperCAmelCase__ = self.preprocess_text(_UpperCAmelCase ) UpperCAmelCase__ = self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase ) UpperCAmelCase__ = [] for piece in pieces: if len(_UpperCAmelCase ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit(): UpperCAmelCase__ = self.sp_model.EncodeAsPieces(piece[:-1].replace(_UpperCAmelCase , """""" ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: UpperCAmelCase__ = cur_pieces[1:] else: UpperCAmelCase__ = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(_UpperCAmelCase ) else: new_pieces.append(_UpperCAmelCase ) return new_pieces def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : Union[str, Any] ): """simple docstring""" return self.sp_model.PieceToId(_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : Any ): """simple docstring""" return self.sp_model.IdToPiece(_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : Dict ): """simple docstring""" UpperCAmelCase__ = """""".join(_UpperCAmelCase ).replace(_UpperCAmelCase , """ """ ).strip() return out_string def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ): """simple docstring""" UpperCAmelCase__ = [self.sep_token_id] UpperCAmelCase__ = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None , _UpperCAmelCase : bool = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase ) if token_ids_a is not None: return ([0] * len(_UpperCAmelCase )) + [1] + ([0] * len(_UpperCAmelCase )) + [1, 1] return ([0] * len(_UpperCAmelCase )) + [1, 1] def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ): """simple docstring""" UpperCAmelCase__ = [self.sep_token_id] UpperCAmelCase__ = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ): """simple docstring""" if not os.path.isdir(_UpperCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return UpperCAmelCase__ = os.path.join( _UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(_UpperCAmelCase , """wb""" ) as fi: UpperCAmelCase__ = self.sp_model.serialized_model_proto() fi.write(_UpperCAmelCase ) return (out_vocab_file,) def SCREAMING_SNAKE_CASE__ ( self : Tuple , *_UpperCAmelCase : Tuple , **_UpperCAmelCase : Optional[Any] ): """simple docstring""" UpperCAmelCase__ = super()._decode(*_UpperCAmelCase , **_UpperCAmelCase ) UpperCAmelCase__ = text.replace(""" """ , """""" ).replace("""\u2582""" , """ """ ).replace("""\u2583""" , """\n""" ) return text
346
0
"""simple docstring""" import enum import warnings from ..tokenization_utils import TruncationStrategy from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING __snake_case : Union[str, Any] = logging.get_logger(__name__) class A__ ( enum.Enum ): '''simple docstring''' SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = 1 @add_end_docstrings(lowerCamelCase_ ) class A__ ( lowerCamelCase_ ): '''simple docstring''' SCREAMING_SNAKE_CASE = """generated""" def __init__( self: str , *_SCREAMING_SNAKE_CASE: Optional[int] , **_SCREAMING_SNAKE_CASE: Tuple) -> Optional[int]: """simple docstring""" super().__init__(*_UpperCAmelCase , **_UpperCAmelCase) self.check_model_type( TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING if self.framework == "tf" else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING) def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Optional[Any]=None , _SCREAMING_SNAKE_CASE: List[str]=None , _SCREAMING_SNAKE_CASE: int=None , _SCREAMING_SNAKE_CASE: Tuple=None , _SCREAMING_SNAKE_CASE: List[Any]=None , _SCREAMING_SNAKE_CASE: str=None , **_SCREAMING_SNAKE_CASE: Optional[int] , ) -> Tuple: """simple docstring""" __lowerCAmelCase : Tuple = {} if truncation is not None: __lowerCAmelCase : int = truncation __lowerCAmelCase : Optional[Any] = generate_kwargs __lowerCAmelCase : Dict = {} if return_tensors is not None and return_type is None: __lowerCAmelCase : Any = ReturnType.TENSORS if return_tensors else ReturnType.TEXT if return_type is not None: __lowerCAmelCase : int = return_type if clean_up_tokenization_spaces is not None: __lowerCAmelCase : Any = clean_up_tokenization_spaces if stop_sequence is not None: __lowerCAmelCase : Dict = self.tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase) if len(_UpperCAmelCase) > 1: warnings.warn( "Stopping on a multiple token sequence is not yet supported on transformers. The first token of" " the stop sequence will be used as the stop sequence string in the interim.") __lowerCAmelCase : Optional[Any] = stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: int) -> Union[str, Any]: """simple docstring""" return True def _SCREAMING_SNAKE_CASE ( self: Dict , *_SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: List[str]) -> Any: """simple docstring""" __lowerCAmelCase : int = self.model.config.prefix if self.model.config.prefix is not None else "" if isinstance(args[0] , _UpperCAmelCase): if self.tokenizer.pad_token_id is None: raise ValueError("Please make sure that the tokenizer has a pad_token_id when using a batch input") __lowerCAmelCase : List[Any] = ([prefix + arg for arg in args[0]],) __lowerCAmelCase : List[str] = True elif isinstance(args[0] , _UpperCAmelCase): __lowerCAmelCase : Union[str, Any] = (prefix + args[0],) __lowerCAmelCase : str = False else: raise ValueError( F""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""") __lowerCAmelCase : Union[str, Any] = self.tokenizer(*_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors=self.framework) # This is produced by tokenizers but is an invalid generate kwargs if "token_type_ids" in inputs: del inputs["token_type_ids"] return inputs def __call__( self: str , *_SCREAMING_SNAKE_CASE: Optional[int] , **_SCREAMING_SNAKE_CASE: str) -> Tuple: """simple docstring""" __lowerCAmelCase : Optional[Any] = super().__call__(*_UpperCAmelCase , **_UpperCAmelCase) if ( isinstance(args[0] , _UpperCAmelCase) and all(isinstance(_UpperCAmelCase , _UpperCAmelCase) for el in args[0]) and all(len(_UpperCAmelCase) == 1 for res in result) ): return [res[0] for res in result] return result def _SCREAMING_SNAKE_CASE ( self: int , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: List[Any]=TruncationStrategy.DO_NOT_TRUNCATE , **_SCREAMING_SNAKE_CASE: List[Any]) -> Optional[Any]: """simple docstring""" __lowerCAmelCase : Dict = self._parse_and_tokenize(_UpperCAmelCase , truncation=_UpperCAmelCase , **_UpperCAmelCase) return inputs def _SCREAMING_SNAKE_CASE ( self: Any , _SCREAMING_SNAKE_CASE: str , **_SCREAMING_SNAKE_CASE: Dict) -> Optional[Any]: """simple docstring""" if self.framework == "pt": __lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = model_inputs["input_ids"].shape elif self.framework == "tf": __lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = tf.shape(model_inputs["input_ids"]).numpy() __lowerCAmelCase : Union[str, Any] = generate_kwargs.get("min_length" , self.model.config.min_length) __lowerCAmelCase : List[str] = generate_kwargs.get("max_length" , self.model.config.max_length) self.check_inputs(_UpperCAmelCase , generate_kwargs["min_length"] , generate_kwargs["max_length"]) __lowerCAmelCase : Dict = self.model.generate(**_UpperCAmelCase , **_UpperCAmelCase) __lowerCAmelCase : Optional[Any] = output_ids.shape[0] if self.framework == "pt": __lowerCAmelCase : Any = output_ids.reshape(_UpperCAmelCase , out_b // in_b , *output_ids.shape[1:]) elif self.framework == "tf": __lowerCAmelCase : List[str] = tf.reshape(_UpperCAmelCase , (in_b, out_b // in_b, *output_ids.shape[1:])) return {"output_ids": output_ids} def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: int=ReturnType.TEXT , _SCREAMING_SNAKE_CASE: Tuple=False) -> Optional[Any]: """simple docstring""" __lowerCAmelCase : List[Any] = [] for output_ids in model_outputs["output_ids"][0]: if return_type == ReturnType.TENSORS: __lowerCAmelCase : Dict = {F"""{self.return_name}_token_ids""": output_ids} elif return_type == ReturnType.TEXT: __lowerCAmelCase : Optional[int] = { F"""{self.return_name}_text""": self.tokenizer.decode( _UpperCAmelCase , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase , ) } records.append(_UpperCAmelCase) return records @add_end_docstrings(lowerCamelCase_ ) class A__ ( lowerCamelCase_ ): '''simple docstring''' SCREAMING_SNAKE_CASE = """summary""" def __call__( self: List[Any] , *_SCREAMING_SNAKE_CASE: Optional[int] , **_SCREAMING_SNAKE_CASE: str) -> Optional[Any]: """simple docstring""" return super().__call__(*_UpperCAmelCase , **_UpperCAmelCase) def _SCREAMING_SNAKE_CASE ( self: int , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: int) -> Union[str, Any]: """simple docstring""" if max_length < min_length: logger.warning(F"""Your min_length={min_length} must be inferior than your max_length={max_length}.""") if input_length < max_length: logger.warning( F"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """ "a summarization task, where outputs shorter than the input are typically wanted, you might " F"""consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})""") @add_end_docstrings(lowerCamelCase_ ) class A__ ( lowerCamelCase_ ): '''simple docstring''' SCREAMING_SNAKE_CASE = """translation""" def _SCREAMING_SNAKE_CASE ( self: Optional[int] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: int) -> Any: """simple docstring""" if input_length > 0.9 * max_length: logger.warning( F"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """ "increasing your max_length manually, e.g. translator('...', max_length=400)") return True def _SCREAMING_SNAKE_CASE ( self: List[Any] , *_SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Dict=TruncationStrategy.DO_NOT_TRUNCATE , _SCREAMING_SNAKE_CASE: str=None , _SCREAMING_SNAKE_CASE: List[Any]=None) -> Any: """simple docstring""" if getattr(self.tokenizer , "_build_translation_inputs" , _UpperCAmelCase): return self.tokenizer._build_translation_inputs( *_UpperCAmelCase , return_tensors=self.framework , truncation=_UpperCAmelCase , src_lang=_UpperCAmelCase , tgt_lang=_UpperCAmelCase) else: return super()._parse_and_tokenize(*_UpperCAmelCase , truncation=_UpperCAmelCase) def _SCREAMING_SNAKE_CASE ( self: int , _SCREAMING_SNAKE_CASE: Union[str, Any]=None , _SCREAMING_SNAKE_CASE: Optional[int]=None , **_SCREAMING_SNAKE_CASE: str) -> Tuple: """simple docstring""" __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : List[Any] = super()._sanitize_parameters(**_UpperCAmelCase) if src_lang is not None: __lowerCAmelCase : str = src_lang if tgt_lang is not None: __lowerCAmelCase : Tuple = tgt_lang if src_lang is None and tgt_lang is None: # Backward compatibility, direct arguments use is preferred. __lowerCAmelCase : int = kwargs.get("task" , self.task) __lowerCAmelCase : Optional[int] = task.split("_") if task and len(_UpperCAmelCase) == 4: # translation, XX, to YY __lowerCAmelCase : List[Any] = items[1] __lowerCAmelCase : Any = items[3] return preprocess_params, forward_params, postprocess_params def __call__( self: Tuple , *_SCREAMING_SNAKE_CASE: List[str] , **_SCREAMING_SNAKE_CASE: Optional[int]) -> Optional[Any]: """simple docstring""" return super().__call__(*_UpperCAmelCase , **_UpperCAmelCase)
269
'''simple docstring''' import argparse import logging import os import datasets import tensorflow as tf from transformers import AutoTokenizer UpperCAmelCase_ = logging.getLogger(__name__) def _UpperCamelCase ( ): '''simple docstring''' UpperCAmelCase__ = argparse.ArgumentParser( description="""Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.""" ) parser.add_argument( """--dataset_name""" , type=SCREAMING_SNAKE_CASE__ , default="""wikitext""" , help="""Name of the training. Explore datasets at: hf.co/datasets.""" , ) parser.add_argument( """--dataset_config""" , type=SCREAMING_SNAKE_CASE__ , default="""wikitext-103-raw-v1""" , help="""Configuration name of the dataset.""" ) parser.add_argument( """--tokenizer_name_or_path""" , type=SCREAMING_SNAKE_CASE__ , default="""sayakpaul/unigram-tokenizer-wikitext""" , help="""Tokenizer identifier. Can be a local filepath or a Hub identifier.""" , ) parser.add_argument( """--shard_size""" , type=SCREAMING_SNAKE_CASE__ , default=1000 , help="""Number of entries to go in a single shard.""" , ) parser.add_argument("""--split""" , type=SCREAMING_SNAKE_CASE__ , default="""train""" , choices=["""train""", """test""", """validation"""] ) parser.add_argument( """--limit""" , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , help="""Limit the number of shards (used for debugging).""" , ) parser.add_argument( """--max_length""" , type=SCREAMING_SNAKE_CASE__ , default=512 , help="""Maximum sequence length. For training on TPUs, it helps to have a maximum""" """ sequence length that is a multiple of 8.""" , ) parser.add_argument( """--output_dir""" , default="""tf-tpu""" , type=SCREAMING_SNAKE_CASE__ , help="""Output directory where the TFRecord shards will be saved. If the""" """ path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord""" """ shards will be directly saved to a Google Cloud Storage bucket.""" , ) UpperCAmelCase__ = parser.parse_args() return args def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Any ): '''simple docstring''' def fn(SCREAMING_SNAKE_CASE__ : Union[str, Any] ): return tokenizer(examples["""text"""] ) return fn def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] ): '''simple docstring''' UpperCAmelCase__ = [] for i in range(len(tokenized_data["""input_ids"""] ) ): UpperCAmelCase__ = { """input_ids""": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["""input_ids"""][i] ) ), """attention_mask""": tf.train.Feature( intaa_list=tf.train.IntaaList(value=tokenized_data["""attention_mask"""][i] ) ), } UpperCAmelCase__ = tf.train.Features(feature=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = tf.train.Example(features=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = example.SerializeToString() records.append(SCREAMING_SNAKE_CASE__ ) return records def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Any ): '''simple docstring''' UpperCAmelCase__ = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split ) if args.limit is not None: UpperCAmelCase__ = min(len(SCREAMING_SNAKE_CASE__ ) , args.limit ) UpperCAmelCase__ = dataset.select(range(SCREAMING_SNAKE_CASE__ ) ) print(F'''Limiting the dataset to {args.limit} entries.''' ) UpperCAmelCase__ = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path ) # Handle output directory creation. # For serializing into a Google Cloud Storage Bucket, one needs to first # create a bucket. if "gs" not in args.output_dir: if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) UpperCAmelCase__ = os.path.join(args.output_dir , args.split ) if not os.path.exists(SCREAMING_SNAKE_CASE__ ): os.makedirs(SCREAMING_SNAKE_CASE__ ) else: UpperCAmelCase__ = os.path.join(args.output_dir , args.split ) # Tokenize the whole dataset at once. UpperCAmelCase__ = tokenize_function(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = dataset.map(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , num_proc=4 , remove_columns=["""text"""] ) # We need to concatenate all our texts together, and then split the result # into chunks of a fixed size, which we will call block_size. To do this, we # will use the map method again, with the option batched=True. When we use batched=True, # the function we pass to map() will be passed multiple inputs at once, allowing us # to group them into more or fewer examples than we had in the input. # This allows us to create our new fixed-length samples. The advantage of this # method is that we don't lose a whole lot of content from the dataset compared to the # case where we simply tokenize with a pre-defined max_length. def group_texts(SCREAMING_SNAKE_CASE__ : int ): # Concatenate all texts. UpperCAmelCase__ = {k: sum(examples[k] , [] ) for k in examples.keys()} UpperCAmelCase__ = len(concatenated_examples[list(examples.keys() )[0]] ) # We drop the small remainder, though you could add padding instead if the model supports it # In this, as in all things, we advise you to follow your heart 🫀 UpperCAmelCase__ = (total_length // args.max_length) * args.max_length # Split by chunks of max_len. UpperCAmelCase__ = { k: [t[i : i + args.max_length] for i in range(0 , SCREAMING_SNAKE_CASE__ , args.max_length )] for k, t in concatenated_examples.items() } return result UpperCAmelCase__ = dataset_tokenized.map(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , batch_size=1000 , num_proc=4 ) UpperCAmelCase__ = 0 UpperCAmelCase__ = 0 for shard in range(0 , len(SCREAMING_SNAKE_CASE__ ) , args.shard_size ): UpperCAmelCase__ = grouped_dataset[shard : shard + args.shard_size] UpperCAmelCase__ = len(dataset_snapshot["""input_ids"""] ) UpperCAmelCase__ = os.path.join(SCREAMING_SNAKE_CASE__ , F'''dataset-{shard_count}-{records_containing}.tfrecord''' ) UpperCAmelCase__ = get_serialized_examples(SCREAMING_SNAKE_CASE__ ) with tf.io.TFRecordWriter(SCREAMING_SNAKE_CASE__ ) as out_file: for i in range(len(SCREAMING_SNAKE_CASE__ ) ): UpperCAmelCase__ = serialized_examples[i] out_file.write(SCREAMING_SNAKE_CASE__ ) print("""Wrote file {} containing {} records""".format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) shard_count += 1 total_records += records_containing with open(F'''split-{args.split}-records-count.txt''' , """w""" ) as f: print(F'''Total {args.split} records: {total_records}''' , file=SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": UpperCAmelCase_ = parse_args() main(args)
346
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) __a = { '''configuration_mega''': ['''MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegaConfig''', '''MegaOnnxConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ '''MEGA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MegaForCausalLM''', '''MegaForMaskedLM''', '''MegaForMultipleChoice''', '''MegaForQuestionAnswering''', '''MegaForSequenceClassification''', '''MegaForTokenClassification''', '''MegaModel''', '''MegaPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mega import ( MEGA_PRETRAINED_MODEL_ARCHIVE_LIST, MegaForCausalLM, MegaForMaskedLM, MegaForMultipleChoice, MegaForQuestionAnswering, MegaForSequenceClassification, MegaForTokenClassification, MegaModel, MegaPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
337
'''simple docstring''' import numpy as np import torch from torch.nn import CrossEntropyLoss from transformers import AutoModelForCausalLM, AutoTokenizer import datasets from datasets import logging UpperCAmelCase_ = '\\n\n' UpperCAmelCase_ = '\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n' UpperCAmelCase_ = '\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to \'cuda\' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 78.22\n >>> print(round(results["perplexities"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = datasets.load_dataset("wikitext",\n ... "wikitext-2-raw-v1",\n ... split="test")["text"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!=\'\']\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 60.35\n >>> print(round(results["perplexities"][0], 2))\n 81.12\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase_ ( datasets.Metric ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Dict ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """input_texts""": datasets.Value("""string""" ), } ) , reference_urls=["""https://huggingface.co/docs/transformers/perplexity"""] , ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : int = 16 , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[int]=None ): """simple docstring""" if device is not None: assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu." if device == "gpu": UpperCAmelCase__ = """cuda""" else: UpperCAmelCase__ = """cuda""" if torch.cuda.is_available() else """cpu""" UpperCAmelCase__ = AutoModelForCausalLM.from_pretrained(_UpperCAmelCase ) UpperCAmelCase__ = model.to(_UpperCAmelCase ) UpperCAmelCase__ = AutoTokenizer.from_pretrained(_UpperCAmelCase ) # if batch_size > 1 (which generally leads to padding being required), and # if there is not an already assigned pad_token, assign an existing # special token to also be the padding token if tokenizer.pad_token is None and batch_size > 1: UpperCAmelCase__ = list(tokenizer.special_tokens_map_extended.values() ) # check that the model already has at least one special token defined assert ( len(_UpperCAmelCase ) > 0 ), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1." # assign one of the special tokens to also be the pad token tokenizer.add_special_tokens({"""pad_token""": existing_special_tokens[0]} ) if add_start_token: # leave room for <BOS> token to be added: assert ( tokenizer.bos_token is not None ), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False" UpperCAmelCase__ = model.config.max_length - 1 else: UpperCAmelCase__ = model.config.max_length UpperCAmelCase__ = tokenizer( _UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , return_tensors="""pt""" , return_attention_mask=_UpperCAmelCase , ).to(_UpperCAmelCase ) UpperCAmelCase__ = encodings["""input_ids"""] UpperCAmelCase__ = encodings["""attention_mask"""] # check that each input is long enough: if add_start_token: assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long." else: assert torch.all( torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings." UpperCAmelCase__ = [] UpperCAmelCase__ = CrossEntropyLoss(reduction="""none""" ) for start_index in logging.tqdm(range(0 , len(_UpperCAmelCase ) , _UpperCAmelCase ) ): UpperCAmelCase__ = min(start_index + batch_size , len(_UpperCAmelCase ) ) UpperCAmelCase__ = encoded_texts[start_index:end_index] UpperCAmelCase__ = attn_masks[start_index:end_index] if add_start_token: UpperCAmelCase__ = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(_UpperCAmelCase ) UpperCAmelCase__ = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 ) UpperCAmelCase__ = torch.cat( [torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(_UpperCAmelCase ), attn_mask] , dim=1 ) UpperCAmelCase__ = encoded_batch with torch.no_grad(): UpperCAmelCase__ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase ).logits UpperCAmelCase__ = out_logits[..., :-1, :].contiguous() UpperCAmelCase__ = labels[..., 1:].contiguous() UpperCAmelCase__ = attn_mask[..., 1:].contiguous() UpperCAmelCase__ = torch.expa( (loss_fct(shift_logits.transpose(1 , 2 ) , _UpperCAmelCase ) * shift_attention_mask_batch).sum(1 ) / shift_attention_mask_batch.sum(1 ) ) ppls += perplexity_batch.tolist() return {"perplexities": ppls, "mean_perplexity": np.mean(_UpperCAmelCase )}
346
0
import re import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class SCREAMING_SNAKE_CASE__ (lowerCamelCase_ ): __lowerCamelCase : Any = ["""image_processor""", """tokenizer"""] __lowerCamelCase : Dict = """AutoImageProcessor""" __lowerCamelCase : Any = """AutoTokenizer""" def __init__( self , a=None , a=None , **a): lowercase__ : int = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , _UpperCAmelCase , ) lowercase__ : Dict = kwargs.pop('feature_extractor') lowercase__ : str = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.') if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.') super().__init__(_UpperCAmelCase , _UpperCAmelCase) lowercase__ : Union[str, Any] = self.image_processor lowercase__ : Dict = False def __call__( self , *a , **a): if self._in_target_context_manager: return self.current_processor(*_UpperCAmelCase , **_UpperCAmelCase) lowercase__ : Union[str, Any] = kwargs.pop('images' , _UpperCAmelCase) lowercase__ : Tuple = kwargs.pop('text' , _UpperCAmelCase) if len(_UpperCAmelCase) > 0: lowercase__ : Dict = args[0] lowercase__ : List[str] = args[1:] if images is None and text is None: raise ValueError('You need to specify either an `images` or `text` input to process.') if images is not None: lowercase__ : Union[str, Any] = self.image_processor(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase) if text is not None: lowercase__ : Tuple = self.tokenizer(_UpperCAmelCase , **_UpperCAmelCase) if text is None: return inputs elif images is None: return encodings else: lowercase__ : Tuple = encodings['input_ids'] return inputs def snake_case_ ( self , *a , **a): return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase) def snake_case_ ( self , *a , **a): return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase) @contextmanager def snake_case_ ( self): warnings.warn( '`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your ' 'labels by using the argument `text` of the regular `__call__` method (either in the same call as ' 'your images inputs, or in a separate call.') lowercase__ : Dict = True lowercase__ : List[str] = self.tokenizer yield lowercase__ : str = self.image_processor lowercase__ : Tuple = False def snake_case_ ( self , a , a=False , a=None): if added_vocab is None: lowercase__ : Tuple = self.tokenizer.get_added_vocab() lowercase__ : Dict = {} while tokens: lowercase__ : str = re.search(r'<s_(.*?)>' , _UpperCAmelCase , re.IGNORECASE) if start_token is None: break lowercase__ : Tuple = start_token.group(1) lowercase__ : Optional[Any] = re.search(rf"""</s_{key}>""" , _UpperCAmelCase , re.IGNORECASE) lowercase__ : Any = start_token.group() if end_token is None: lowercase__ : Union[str, Any] = tokens.replace(_UpperCAmelCase , '') else: lowercase__ : Tuple = end_token.group() lowercase__ : Dict = re.escape(_UpperCAmelCase) lowercase__ : Optional[Any] = re.escape(_UpperCAmelCase) lowercase__ : Dict = re.search(f"""{start_token_escaped}(.*?){end_token_escaped}""" , _UpperCAmelCase , re.IGNORECASE) if content is not None: lowercase__ : str = content.group(1).strip() if r"<s_" in content and r"</s_" in content: # non-leaf node lowercase__ : Optional[int] = self.tokenajson(_UpperCAmelCase , is_inner_value=_UpperCAmelCase , added_vocab=_UpperCAmelCase) if value: if len(_UpperCAmelCase) == 1: lowercase__ : Tuple = value[0] lowercase__ : Optional[int] = value else: # leaf nodes lowercase__ : Any = [] for leaf in content.split(r'<sep/>'): lowercase__ : Any = leaf.strip() if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>": lowercase__ : Tuple = leaf[1:-2] # for categorical special tokens output[key].append(_UpperCAmelCase) if len(output[key]) == 1: lowercase__ : Optional[Any] = output[key][0] lowercase__ : Optional[Any] = tokens[tokens.find(_UpperCAmelCase) + len(_UpperCAmelCase) :].strip() if tokens[:6] == r"<sep/>": # non-leaf nodes return [output] + self.tokenajson(tokens[6:] , is_inner_value=_UpperCAmelCase , added_vocab=_UpperCAmelCase) if len(_UpperCAmelCase): return [output] if is_inner_value else output else: return [] if is_inner_value else {"text_sequence": tokens} @property def snake_case_ ( self): warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _UpperCAmelCase , ) return self.image_processor_class @property def snake_case_ ( self): warnings.warn( '`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _UpperCAmelCase , ) return self.image_processor
214
'''simple docstring''' def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int = 1000000 ): '''simple docstring''' UpperCAmelCase__ = [i - 1 for i in range(limit + 1 )] for i in range(2 , limit + 1 ): if phi[i] == i - 1: for j in range(2 * i , limit + 1 , SCREAMING_SNAKE_CASE__ ): phi[j] -= phi[j] // i return sum(phi[2 : limit + 1] ) if __name__ == "__main__": print(solution())
346
0
from string import ascii_lowercase, ascii_uppercase def _UpperCamelCase ( lowercase__ ): if not sentence: return "" __SCREAMING_SNAKE_CASE : List[str] = dict(zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:] if __name__ == "__main__": from doctest import testmod testmod()
9
'''simple docstring''' from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING UpperCAmelCase_ = logging.get_logger(__name__) @add_end_docstrings(lowerCamelCase_ ) class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' def __init__( self : Optional[Any] , *_UpperCAmelCase : Union[str, Any] , **_UpperCAmelCase : Dict ): """simple docstring""" super().__init__(*_UpperCAmelCase , **_UpperCAmelCase ) requires_backends(self , """vision""" ) self.check_model_type( TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING if self.framework == """tf""" else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING ) def SCREAMING_SNAKE_CASE__ ( self : str , _UpperCAmelCase : List[Any]=None ): """simple docstring""" UpperCAmelCase__ = {} if top_k is not None: UpperCAmelCase__ = top_k return {}, {}, postprocess_params def __call__( self : Any , _UpperCAmelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_UpperCAmelCase : str ): """simple docstring""" return super().__call__(_UpperCAmelCase , **_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , _UpperCAmelCase : Tuple ): """simple docstring""" UpperCAmelCase__ = load_image(_UpperCAmelCase ) UpperCAmelCase__ = self.image_processor(images=_UpperCAmelCase , return_tensors=self.framework ) return model_inputs def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : Tuple ): """simple docstring""" UpperCAmelCase__ = self.model(**_UpperCAmelCase ) return model_outputs def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : str=5 ): """simple docstring""" if top_k > self.model.config.num_labels: UpperCAmelCase__ = self.model.config.num_labels if self.framework == "pt": UpperCAmelCase__ = model_outputs.logits.softmax(-1 )[0] UpperCAmelCase__ , UpperCAmelCase__ = probs.topk(_UpperCAmelCase ) elif self.framework == "tf": UpperCAmelCase__ = stable_softmax(model_outputs.logits , axis=-1 )[0] UpperCAmelCase__ = tf.math.top_k(_UpperCAmelCase , k=_UpperCAmelCase ) UpperCAmelCase__ , UpperCAmelCase__ = topk.values.numpy(), topk.indices.numpy() else: raise ValueError(f'''Unsupported framework: {self.framework}''' ) UpperCAmelCase__ = scores.tolist() UpperCAmelCase__ = ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCAmelCase , _UpperCAmelCase )]
346
0
"""simple docstring""" import importlib import shutil import threading import warnings from typing import List import fsspec import fsspec.asyn from . import compression from .hffilesystem import HfFileSystem __A = importlib.util.find_spec("s3fs") is not None if _has_safs: from .safilesystem import SaFileSystem # noqa: F401 __A = [ compression.BzaFileSystem, compression.GzipFileSystem, compression.LzaFileSystem, compression.XzFileSystem, compression.ZstdFileSystem, ] # Register custom filesystems for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]: if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class: warnings.warn(f'''A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.''') fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True) def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Tuple: if "://" in dataset_path: lowercase__: int = dataset_path.split('''://''' )[1] return dataset_path def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Any: if fs is not None and fs.protocol != "file": return True else: return False def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]: lowercase__: Union[str, Any] = not is_remote_filesystem(SCREAMING_SNAKE_CASE__ ) if is_local: # LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory shutil.move(fs._strip_protocol(SCREAMING_SNAKE_CASE__ ) , fs._strip_protocol(SCREAMING_SNAKE_CASE__ ) ) else: fs.mv(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , recursive=SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE__ ( ) -> Any: if hasattr(fsspec.asyn , '''reset_lock''' ): # for future fsspec>2022.05.0 fsspec.asyn.reset_lock() else: lowercase__: List[Any] = None lowercase__: Dict = None lowercase__: List[str] = threading.Lock()
177
'''simple docstring''' from math import factorial def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int = 20 ): '''simple docstring''' UpperCAmelCase__ = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1, # 2, 3,... UpperCAmelCase__ = n // 2 return int(factorial(SCREAMING_SNAKE_CASE__ ) / (factorial(SCREAMING_SNAKE_CASE__ ) * factorial(n - k )) ) if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution(2_0)) else: try: UpperCAmelCase_ = int(sys.argv[1]) print(solution(n)) except ValueError: print('Invalid entry - please enter a number.')
346
0
'''simple docstring''' import argparse import json from typing import List from ltp import LTP from transformers.models.bert.tokenization_bert import BertTokenizer def __UpperCamelCase ( UpperCAmelCase ): if ( (cp >= 0x4_e00 and cp <= 0x9_fff) or (cp >= 0x3_400 and cp <= 0x4_dbf) # or (cp >= 0x20_000 and cp <= 0x2a_6df) # or (cp >= 0x2a_700 and cp <= 0x2b_73f) # or (cp >= 0x2b_740 and cp <= 0x2b_81f) # or (cp >= 0x2b_820 and cp <= 0x2c_eaf) # or (cp >= 0xf_900 and cp <= 0xf_aff) or (cp >= 0x2f_800 and cp <= 0x2f_a1f) # ): # return True return False def __UpperCamelCase ( UpperCAmelCase ): for char in word: lowercase__ : int = ord(SCREAMING_SNAKE_CASE__ ) if not _is_chinese_char(SCREAMING_SNAKE_CASE__ ): return 0 return 1 def __UpperCamelCase ( UpperCAmelCase ): lowercase__ : Dict = set() for token in tokens: lowercase__ : Optional[int] = len(SCREAMING_SNAKE_CASE__ ) > 1 and is_chinese(SCREAMING_SNAKE_CASE__ ) if chinese_word: word_set.add(SCREAMING_SNAKE_CASE__ ) lowercase__ : str = list(SCREAMING_SNAKE_CASE__ ) return word_list def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ): if not chinese_word_set: return bert_tokens lowercase__ : Any = max([len(SCREAMING_SNAKE_CASE__ ) for w in chinese_word_set] ) lowercase__ : Tuple = bert_tokens lowercase__ , lowercase__ : int = 0, len(SCREAMING_SNAKE_CASE__ ) while start < end: lowercase__ : List[str] = True if is_chinese(bert_word[start] ): lowercase__ : Optional[int] = min(end - start , SCREAMING_SNAKE_CASE__ ) for i in range(SCREAMING_SNAKE_CASE__ , 1 , -1 ): lowercase__ : Dict = ''''''.join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 , start + i ): lowercase__ : Dict = '''##''' + bert_word[j] lowercase__ : Tuple = start + i lowercase__ : List[str] = False break if single_word: start += 1 return bert_word def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ): lowercase__ : str = [] for i in range(0 , len(SCREAMING_SNAKE_CASE__ ) , 100 ): lowercase__ : int = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=['''cws'''] ).cws lowercase__ : List[str] = [get_chinese_word(SCREAMING_SNAKE_CASE__ ) for r in res] ltp_res.extend(SCREAMING_SNAKE_CASE__ ) assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ ) lowercase__ : List[Any] = [] for i in range(0 , len(SCREAMING_SNAKE_CASE__ ) , 100 ): lowercase__ : Dict = bert_tokenizer(lines[i : i + 100] , add_special_tokens=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=512 ) bert_res.extend(res['''input_ids'''] ) assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ ) lowercase__ : Optional[int] = [] for input_ids, chinese_word in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): lowercase__ : int = [] for id in input_ids: lowercase__ : List[Any] = bert_tokenizer._convert_id_to_token(SCREAMING_SNAKE_CASE__ ) input_tokens.append(SCREAMING_SNAKE_CASE__ ) lowercase__ : str = add_sub_symbol(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) lowercase__ : List[Any] = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(SCREAMING_SNAKE_CASE__ ): if token[:2] == "##": lowercase__ : List[Any] = token[2:] # save chinese tokens' pos if len(SCREAMING_SNAKE_CASE__ ) == 1 and _is_chinese_char(ord(SCREAMING_SNAKE_CASE__ ) ): ref_id.append(SCREAMING_SNAKE_CASE__ ) ref_ids.append(SCREAMING_SNAKE_CASE__ ) assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ ) return ref_ids def __UpperCamelCase ( UpperCAmelCase ): with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f: lowercase__ : Union[str, Any] = f.readlines() lowercase__ : List[str] = [line.strip() for line in data if len(SCREAMING_SNAKE_CASE__ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' lowercase__ : List[str] = LTP(args.ltp ) # faster in GPU device lowercase__ : Tuple = BertTokenizer.from_pretrained(args.bert ) lowercase__ : Any = prepare_ref(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f: lowercase__ : str = [json.dumps(SCREAMING_SNAKE_CASE__ ) + '''\n''' for ref in ref_ids] f.writelines(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": __a: Optional[int] = argparse.ArgumentParser(description="""prepare_chinese_ref""") parser.add_argument( """--file_name""", required=False, type=str, default="""./resources/chinese-demo.txt""", help="""file need process, same as training data in lm""", ) parser.add_argument( """--ltp""", required=False, type=str, default="""./resources/ltp""", help="""resources for LTP tokenizer, usually a path""", ) parser.add_argument( """--bert""", required=False, type=str, default="""./resources/robert""", help="""resources for Bert tokenizer""", ) parser.add_argument( """--save_path""", required=False, type=str, default="""./resources/ref.txt""", help="""path to save res""", ) __a: Union[str, Any] = parser.parse_args() main(args)
198
'''simple docstring''' import json import os import unittest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ : int = MgpstrTokenizer lowerCAmelCase_ : List[str] = False lowerCAmelCase_ : Optional[int] = {} lowerCAmelCase_ : Any = False def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): """simple docstring""" super().setUp() # fmt: off UpperCAmelCase__ = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""] # fmt: on UpperCAmelCase__ = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) ) UpperCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(_UpperCAmelCase ) + """\n""" ) def SCREAMING_SNAKE_CASE__ ( self : List[str] , **_UpperCAmelCase : Optional[Any] ): """simple docstring""" return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : Tuple ): """simple docstring""" UpperCAmelCase__ = """tester""" UpperCAmelCase__ = """tester""" return input_text, output_text @unittest.skip("""MGP-STR always lower cases letters.""" ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" pass def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" UpperCAmelCase__ = self.get_tokenizers(do_lower_case=_UpperCAmelCase ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): UpperCAmelCase__ = """[SPECIAL_TOKEN]""" tokenizer.add_special_tokens({"""cls_token""": special_token} ) UpperCAmelCase__ = tokenizer.encode([special_token] , add_special_tokens=_UpperCAmelCase ) self.assertEqual(len(_UpperCAmelCase ) , 1 ) UpperCAmelCase__ = tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase ) self.assertTrue(special_token not in decoded ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" UpperCAmelCase__ = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): UpperCAmelCase__ , UpperCAmelCase__ = self.get_input_output_texts(_UpperCAmelCase ) UpperCAmelCase__ = tokenizer.tokenize(_UpperCAmelCase ) UpperCAmelCase__ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) UpperCAmelCase__ = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = tokenizer.convert_ids_to_tokens(_UpperCAmelCase ) self.assertNotEqual(len(_UpperCAmelCase ) , 0 ) UpperCAmelCase__ = tokenizer.decode(_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase ) self.assertEqual(text_a.replace(""" """ , """""" ) , _UpperCAmelCase ) @unittest.skip("""MGP-STR tokenizer only handles one sequence.""" ) def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" pass @unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""" ) def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" pass
346
0
import os import socket from contextlib import contextmanager import torch from ..commands.config.default import write_basic_config # noqa: F401 from ..state import PartialState from .dataclasses import DistributedType from .imports import is_deepspeed_available, is_tpu_available from .transformer_engine import convert_model from .versions import is_torch_version if is_deepspeed_available(): from deepspeed import DeepSpeedEngine if is_tpu_available(check_device=False): import torch_xla.core.xla_model as xm def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Optional[int]: if is_torch_version("<" , "2.0.0" ) or not hasattr(SCREAMING_SNAKE_CASE__ , "_dynamo" ): return False return isinstance(SCREAMING_SNAKE_CASE__ , torch._dynamo.eval_frame.OptimizedModule ) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase = True ) -> List[str]: UpperCamelCase__ : List[Any] = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel) UpperCamelCase__ : str = is_compiled_module(SCREAMING_SNAKE_CASE__ ) if is_compiled: UpperCamelCase__ : Optional[Any] = model UpperCamelCase__ : List[Any] = model._orig_mod if is_deepspeed_available(): options += (DeepSpeedEngine,) while isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): UpperCamelCase__ : List[str] = model.module if not keep_fpaa_wrapper: UpperCamelCase__ : str = getattr(SCREAMING_SNAKE_CASE__ , "forward" ) UpperCamelCase__ : Any = model.__dict__.pop("_original_forward" , SCREAMING_SNAKE_CASE__ ) if original_forward is not None: while hasattr(SCREAMING_SNAKE_CASE__ , "__wrapped__" ): UpperCamelCase__ : str = forward.__wrapped__ if forward == original_forward: break UpperCamelCase__ : Optional[int] = forward if getattr(SCREAMING_SNAKE_CASE__ , "_converted_to_transformer_engine" , SCREAMING_SNAKE_CASE__ ): convert_model(SCREAMING_SNAKE_CASE__ , to_transformer_engine=SCREAMING_SNAKE_CASE__ ) if is_compiled: UpperCamelCase__ : int = model UpperCamelCase__ : Optional[Any] = compiled_model return model def SCREAMING_SNAKE_CASE ( ) -> Dict: PartialState().wait_for_everyone() def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> int: if PartialState().distributed_type == DistributedType.TPU: xm.save(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) elif PartialState().local_process_index == 0: torch.save(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) @contextmanager def SCREAMING_SNAKE_CASE ( **__lowerCAmelCase ) -> Union[str, Any]: for key, value in kwargs.items(): UpperCamelCase__ : Dict = str(SCREAMING_SNAKE_CASE__ ) yield for key in kwargs: if key.upper() in os.environ: del os.environ[key.upper()] def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Optional[Any]: if not hasattr(SCREAMING_SNAKE_CASE__ , "__qualname__" ) and not hasattr(SCREAMING_SNAKE_CASE__ , "__name__" ): UpperCamelCase__ : List[str] = getattr(SCREAMING_SNAKE_CASE__ , "__class__" , SCREAMING_SNAKE_CASE__ ) if hasattr(SCREAMING_SNAKE_CASE__ , "__qualname__" ): return obj.__qualname__ if hasattr(SCREAMING_SNAKE_CASE__ , "__name__" ): return obj.__name__ return str(SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> List[str]: for key, value in source.items(): if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): UpperCamelCase__ : List[str] = destination.setdefault(SCREAMING_SNAKE_CASE__ , {} ) merge_dicts(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else: UpperCamelCase__ : List[str] = value return destination def SCREAMING_SNAKE_CASE ( __lowerCAmelCase = None ) -> Optional[int]: if port is None: UpperCamelCase__ : Optional[Any] = 2_9500 with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s: return s.connect_ex(("localhost", port) ) == 0
189
'''simple docstring''' from abc import ABC, abstractmethod from typing import List, Optional class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' def __init__( self : Union[str, Any] ): """simple docstring""" self.test() def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" UpperCAmelCase__ = 0 UpperCAmelCase__ = False while not completed: if counter == 1: self.reset() UpperCAmelCase__ = self.advance() if not self.does_advance(_UpperCAmelCase ): raise Exception( """Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.""" ) UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.update(_UpperCAmelCase ) counter += 1 if counter > 1_00_00: raise Exception("""update() does not fulfill the constraint.""" ) if self.remaining() != 0: raise Exception("""Custom Constraint is not defined correctly.""" ) @abstractmethod def SCREAMING_SNAKE_CASE__ ( self : Dict ): """simple docstring""" raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : int ): """simple docstring""" raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def SCREAMING_SNAKE_CASE__ ( self : Any , _UpperCAmelCase : int ): """simple docstring""" raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): """simple docstring""" raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : List[Any]=False ): """simple docstring""" raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' def __init__( self : Union[str, Any] , _UpperCAmelCase : List[int] ): """simple docstring""" super(_UpperCAmelCase , self ).__init__() if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or len(_UpperCAmelCase ) == 0: raise ValueError(f'''`token_ids` has to be a non-empty list, but is {token_ids}.''' ) if any((not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or token_id < 0) for token_id in token_ids ): raise ValueError(f'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' ) UpperCAmelCase__ = token_ids UpperCAmelCase__ = len(self.token_ids ) UpperCAmelCase__ = -1 # the index of the currently fulfilled step UpperCAmelCase__ = False def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" if self.completed: return None return self.token_ids[self.fulfilled_idx + 1] def SCREAMING_SNAKE_CASE__ ( self : List[str] , _UpperCAmelCase : int ): """simple docstring""" if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(_UpperCAmelCase )}''' ) if self.completed: return False return token_id == self.token_ids[self.fulfilled_idx + 1] def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , _UpperCAmelCase : int ): """simple docstring""" if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(_UpperCAmelCase )}''' ) UpperCAmelCase__ = False UpperCAmelCase__ = False UpperCAmelCase__ = False if self.does_advance(_UpperCAmelCase ): self.fulfilled_idx += 1 UpperCAmelCase__ = True if self.fulfilled_idx == (self.seqlen - 1): UpperCAmelCase__ = True UpperCAmelCase__ = completed else: # failed to make progress. UpperCAmelCase__ = True self.reset() return stepped, completed, reset def SCREAMING_SNAKE_CASE__ ( self : Tuple ): """simple docstring""" UpperCAmelCase__ = False UpperCAmelCase__ = 0 def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" return self.seqlen - (self.fulfilled_idx + 1) def SCREAMING_SNAKE_CASE__ ( self : Any , _UpperCAmelCase : Optional[int]=False ): """simple docstring""" UpperCAmelCase__ = PhrasalConstraint(self.token_ids ) if stateful: UpperCAmelCase__ = self.seqlen UpperCAmelCase__ = self.fulfilled_idx UpperCAmelCase__ = self.completed return new_constraint class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Any , _UpperCAmelCase : List[List[int]] , _UpperCAmelCase : List[str]=True ): """simple docstring""" UpperCAmelCase__ = max([len(_UpperCAmelCase ) for one in nested_token_ids] ) UpperCAmelCase__ = {} for token_ids in nested_token_ids: UpperCAmelCase__ = root for tidx, token_id in enumerate(_UpperCAmelCase ): if token_id not in level: UpperCAmelCase__ = {} UpperCAmelCase__ = level[token_id] if no_subsets and self.has_subsets(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError( """Each list in `nested_token_ids` can't be a complete subset of another list, but is""" f''' {nested_token_ids}.''' ) UpperCAmelCase__ = root def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : int ): """simple docstring""" UpperCAmelCase__ = self.trie for current_token in current_seq: UpperCAmelCase__ = start[current_token] UpperCAmelCase__ = list(start.keys() ) return next_tokens def SCREAMING_SNAKE_CASE__ ( self : str , _UpperCAmelCase : Tuple ): """simple docstring""" UpperCAmelCase__ = self.next_tokens(_UpperCAmelCase ) return len(_UpperCAmelCase ) == 0 def SCREAMING_SNAKE_CASE__ ( self : List[str] , _UpperCAmelCase : Optional[int] ): """simple docstring""" UpperCAmelCase__ = list(root.values() ) if len(_UpperCAmelCase ) == 0: return 1 else: return sum([self.count_leaves(_UpperCAmelCase ) for nn in next_nodes] ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict ): """simple docstring""" UpperCAmelCase__ = self.count_leaves(_UpperCAmelCase ) return len(_UpperCAmelCase ) != leaf_count class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' def __init__( self : Dict , _UpperCAmelCase : List[List[int]] ): """simple docstring""" super(_UpperCAmelCase , self ).__init__() if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or len(_UpperCAmelCase ) == 0: raise ValueError(f'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' ) if any(not isinstance(_UpperCAmelCase , _UpperCAmelCase ) for token_ids in nested_token_ids ): raise ValueError(f'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' ) if any( any((not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or token_id < 0) for token_id in token_ids ) for token_ids in nested_token_ids ): raise ValueError( f'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' ) UpperCAmelCase__ = DisjunctiveTrie(_UpperCAmelCase ) UpperCAmelCase__ = nested_token_ids UpperCAmelCase__ = self.trie.max_height UpperCAmelCase__ = [] UpperCAmelCase__ = False def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" UpperCAmelCase__ = self.trie.next_tokens(self.current_seq ) if len(_UpperCAmelCase ) == 0: return None else: return token_list def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : int ): """simple docstring""" if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(_UpperCAmelCase )}''' ) UpperCAmelCase__ = self.trie.next_tokens(self.current_seq ) return token_id in next_tokens def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : int ): """simple docstring""" if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(_UpperCAmelCase )}''' ) UpperCAmelCase__ = False UpperCAmelCase__ = False UpperCAmelCase__ = False if self.does_advance(_UpperCAmelCase ): self.current_seq.append(_UpperCAmelCase ) UpperCAmelCase__ = True else: UpperCAmelCase__ = True self.reset() UpperCAmelCase__ = self.trie.reached_leaf(self.current_seq ) UpperCAmelCase__ = completed return stepped, completed, reset def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" UpperCAmelCase__ = False UpperCAmelCase__ = [] def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" if self.completed: # since this can be completed without reaching max height return 0 else: return self.seqlen - len(self.current_seq ) def SCREAMING_SNAKE_CASE__ ( self : Tuple , _UpperCAmelCase : Dict=False ): """simple docstring""" UpperCAmelCase__ = DisjunctiveConstraint(self.token_ids ) if stateful: UpperCAmelCase__ = self.seqlen UpperCAmelCase__ = self.current_seq UpperCAmelCase__ = self.completed return new_constraint class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Optional[int] , _UpperCAmelCase : List[Constraint] ): """simple docstring""" UpperCAmelCase__ = constraints # max # of steps required to fulfill a given constraint UpperCAmelCase__ = max([c.seqlen for c in constraints] ) UpperCAmelCase__ = len(_UpperCAmelCase ) UpperCAmelCase__ = False self.init_state() def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" UpperCAmelCase__ = [] UpperCAmelCase__ = None UpperCAmelCase__ = [constraint.copy(stateful=_UpperCAmelCase ) for constraint in self.constraints] def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" UpperCAmelCase__ = 0 if self.inprogress_constraint: # extra points for having a constraint mid-fulfilled add += self.max_seqlen - self.inprogress_constraint.remaining() return (len(self.complete_constraints ) * self.max_seqlen) + add def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): """simple docstring""" UpperCAmelCase__ = [] if self.inprogress_constraint is None: for constraint in self.pending_constraints: # "pending" == "unfulfilled yet" UpperCAmelCase__ = constraint.advance() if isinstance(_UpperCAmelCase , _UpperCAmelCase ): token_list.append(_UpperCAmelCase ) elif isinstance(_UpperCAmelCase , _UpperCAmelCase ): token_list.extend(_UpperCAmelCase ) else: UpperCAmelCase__ = self.inprogress_constraint.advance() if isinstance(_UpperCAmelCase , _UpperCAmelCase ): token_list.append(_UpperCAmelCase ) elif isinstance(_UpperCAmelCase , _UpperCAmelCase ): token_list.extend(_UpperCAmelCase ) if len(_UpperCAmelCase ) == 0: return None else: return token_list def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : Optional[List[int]] ): """simple docstring""" self.init_state() if token_ids is not None: for token in token_ids: # completes or steps **one** constraint UpperCAmelCase__ , UpperCAmelCase__ = self.add(_UpperCAmelCase ) # the entire list of constraints are fulfilled if self.completed: break def SCREAMING_SNAKE_CASE__ ( self : Any , _UpperCAmelCase : int ): """simple docstring""" if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError(f'''`token_id` should be an `int`, but is `{token_id}`.''' ) UpperCAmelCase__ , UpperCAmelCase__ = False, False if self.completed: UpperCAmelCase__ = True UpperCAmelCase__ = False return complete, stepped if self.inprogress_constraint is not None: # In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current # job, simply update the state UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.inprogress_constraint.update(_UpperCAmelCase ) if reset: # 1. If the next token breaks the progress, then we must restart. # e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books". # But that doesn't mean we self.init_state(), since we only reset the state for this particular # constraint, not the full list of constraints. self.pending_constraints.append(self.inprogress_constraint.copy(stateful=_UpperCAmelCase ) ) UpperCAmelCase__ = None if complete: # 2. If the next token completes the constraint, move it to completed list, set # inprogress to None. If there are no pending constraints either, then this full list of constraints # is complete. self.complete_constraints.append(self.inprogress_constraint ) UpperCAmelCase__ = None if len(self.pending_constraints ) == 0: # we're done! UpperCAmelCase__ = True else: # Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list # of constraints? for cidx, pending_constraint in enumerate(self.pending_constraints ): if pending_constraint.does_advance(_UpperCAmelCase ): UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = pending_constraint.update(_UpperCAmelCase ) if not stepped: raise Exception( """`constraint.update(token_id)` is not yielding incremental progress, """ """even though `constraint.does_advance(token_id)` is true.""" ) if complete: self.complete_constraints.append(_UpperCAmelCase ) UpperCAmelCase__ = None if not complete and stepped: UpperCAmelCase__ = pending_constraint if complete or stepped: # If we made any progress at all, then it's at least not a "pending constraint". UpperCAmelCase__ = ( self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :] ) if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None: # If there's no longer any pending after this and no inprogress either, then we must be # complete. UpperCAmelCase__ = True break # prevent accidentally stepping through multiple constraints with just one token. return complete, stepped def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : List[Any]=True ): """simple docstring""" UpperCAmelCase__ = ConstraintListState(self.constraints ) # we actually never though self.constraints objects # throughout this process. So it's at initialization state. if stateful: UpperCAmelCase__ = [ constraint.copy(stateful=_UpperCAmelCase ) for constraint in self.complete_constraints ] if self.inprogress_constraint is not None: UpperCAmelCase__ = self.inprogress_constraint.copy(stateful=_UpperCAmelCase ) UpperCAmelCase__ = [constraint.copy() for constraint in self.pending_constraints] return new_state
346
0
from collections.abc import Callable import numpy as np def _a ( SCREAMING_SNAKE_CASE_ : Callable , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float ): __lowerCAmelCase = int(np.ceil((x_end - xa) / step_size ) ) __lowerCAmelCase = np.zeros((n + 1,) ) __lowerCAmelCase = ya __lowerCAmelCase = xa for k in range(SCREAMING_SNAKE_CASE__ ): __lowerCAmelCase = y[k] + step_size * ode_func(SCREAMING_SNAKE_CASE__ , y[k] ) __lowerCAmelCase = y[k] + ( (step_size / 2) * (ode_func(SCREAMING_SNAKE_CASE__ , y[k] ) + ode_func(x + step_size , SCREAMING_SNAKE_CASE__ )) ) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
92
'''simple docstring''' import doctest import logging import os import unittest from pathlib import Path from typing import List, Union import transformers from transformers.testing_utils import require_tf, require_torch, slow UpperCAmelCase_ = logging.getLogger() @unittest.skip("""Temporarily disable the doc tests.""" ) @require_torch @require_tf @slow class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : str , _UpperCAmelCase : Path , _UpperCAmelCase : Union[str, None] = None , _UpperCAmelCase : Union[List[str], None] = None , _UpperCAmelCase : Union[str, List[str], None] = None , _UpperCAmelCase : bool = True , ): """simple docstring""" UpperCAmelCase__ = [file for file in os.listdir(_UpperCAmelCase ) if os.path.isfile(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) )] if identifier is not None: UpperCAmelCase__ = [file for file in files if identifier in file] if n_identifier is not None: if isinstance(_UpperCAmelCase , _UpperCAmelCase ): for n_ in n_identifier: UpperCAmelCase__ = [file for file in files if n_ not in file] else: UpperCAmelCase__ = [file for file in files if n_identifier not in file] UpperCAmelCase__ = ignore_files or [] ignore_files.append("""__init__.py""" ) UpperCAmelCase__ = [file for file in files if file not in ignore_files] for file in files: # Open all files print("""Testing""" , _UpperCAmelCase ) if only_modules: UpperCAmelCase__ = file.split(""".""" )[0] try: UpperCAmelCase__ = getattr(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = doctest.DocTestSuite(_UpperCAmelCase ) UpperCAmelCase__ = unittest.TextTestRunner().run(_UpperCAmelCase ) self.assertIs(len(result.failures ) , 0 ) except AttributeError: logger.info(f'''{module_identifier} is not a module.''' ) else: UpperCAmelCase__ = doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS ) self.assertIs(result.failed , 0 ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" UpperCAmelCase__ = Path("""src/transformers""" ) UpperCAmelCase__ = """modeling""" UpperCAmelCase__ = [ """modeling_ctrl.py""", """modeling_tf_ctrl.py""", ] self.analyze_directory(_UpperCAmelCase , identifier=_UpperCAmelCase , ignore_files=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" UpperCAmelCase__ = Path("""src/transformers""" ) UpperCAmelCase__ = """tokenization""" self.analyze_directory(_UpperCAmelCase , identifier=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" UpperCAmelCase__ = Path("""src/transformers""" ) UpperCAmelCase__ = """configuration""" self.analyze_directory(_UpperCAmelCase , identifier=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" UpperCAmelCase__ = Path("""src/transformers""" ) UpperCAmelCase__ = ["""configuration""", """modeling""", """tokenization"""] self.analyze_directory(_UpperCAmelCase , n_identifier=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" UpperCAmelCase__ = Path("""docs/source""" ) UpperCAmelCase__ = ["""favicon.ico"""] self.analyze_directory(_UpperCAmelCase , ignore_files=_UpperCAmelCase , only_modules=_UpperCAmelCase )
346
0
from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import torch from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available @dataclass class UpperCAmelCase ( lowerCamelCase_ ): '''simple docstring''' lowerCAmelCase_ = 42 try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_text_to_video_synth import TextToVideoSDPipeline from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401 from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
187
'''simple docstring''' from datasets.utils.patching import _PatchedModuleObj, patch_submodule from . import _test_patching def _UpperCamelCase ( ): '''simple docstring''' import os as original_os from os import path as original_path from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join UpperCAmelCase__ = """__test_patch_submodule_mock__""" with patch_submodule(_test_patching , """os.path.join""" , SCREAMING_SNAKE_CASE__ ): # Every way to access os.path.join must be patched, and the rest must stay untouched # check os.path.join assert isinstance(_test_patching.os , _PatchedModuleObj ) assert isinstance(_test_patching.os.path , _PatchedModuleObj ) assert _test_patching.os.path.join is mock # check path.join assert isinstance(_test_patching.path , _PatchedModuleObj ) assert _test_patching.path.join is mock # check join assert _test_patching.join is mock # check that the other attributes are untouched assert _test_patching.os.rename is original_rename assert _test_patching.path.dirname is original_dirname assert _test_patching.os.path.dirname is original_dirname # Even renamed modules or objects must be patched # check renamed_os.path.join assert isinstance(_test_patching.renamed_os , _PatchedModuleObj ) assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj ) assert _test_patching.renamed_os.path.join is mock # check renamed_path.join assert isinstance(_test_patching.renamed_path , _PatchedModuleObj ) assert _test_patching.renamed_path.join is mock # check renamed_join assert _test_patching.renamed_join is mock # check that the other attributes are untouched assert _test_patching.renamed_os.rename is original_rename assert _test_patching.renamed_path.dirname is original_dirname assert _test_patching.renamed_os.path.dirname is original_dirname # check that everthing is back to normal when the patch is over assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join def _UpperCamelCase ( ): '''simple docstring''' assert _test_patching.open is open UpperCAmelCase__ = """__test_patch_submodule_builtin_mock__""" # _test_patching has "open" in its globals assert _test_patching.open is open with patch_submodule(_test_patching , """open""" , SCREAMING_SNAKE_CASE__ ): assert _test_patching.open is mock # check that everthing is back to normal when the patch is over assert _test_patching.open is open def _UpperCamelCase ( ): '''simple docstring''' UpperCAmelCase__ = """__test_patch_submodule_missing_mock__""" with patch_submodule(_test_patching , """pandas.read_csv""" , SCREAMING_SNAKE_CASE__ ): pass def _UpperCamelCase ( ): '''simple docstring''' UpperCAmelCase__ = """__test_patch_submodule_missing_builtin_mock__""" # _test_patching doesn't have "len" in its globals assert getattr(_test_patching , """len""" , SCREAMING_SNAKE_CASE__ ) is None with patch_submodule(_test_patching , """len""" , SCREAMING_SNAKE_CASE__ ): assert _test_patching.len is mock assert _test_patching.len is len def _UpperCamelCase ( ): '''simple docstring''' UpperCAmelCase__ = """__test_patch_submodule_start_and_stop_mock__""" UpperCAmelCase__ = patch_submodule(_test_patching , """open""" , SCREAMING_SNAKE_CASE__ ) assert _test_patching.open is open patch.start() assert _test_patching.open is mock patch.stop() assert _test_patching.open is open def _UpperCamelCase ( ): '''simple docstring''' from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join UpperCAmelCase__ = """__test_patch_submodule_successive_join__""" UpperCAmelCase__ = """__test_patch_submodule_successive_dirname__""" UpperCAmelCase__ = """__test_patch_submodule_successive_rename__""" assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename with patch_submodule(_test_patching , """os.path.join""" , SCREAMING_SNAKE_CASE__ ): with patch_submodule(_test_patching , """os.rename""" , SCREAMING_SNAKE_CASE__ ): with patch_submodule(_test_patching , """os.path.dirname""" , SCREAMING_SNAKE_CASE__ ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename # try another order with patch_submodule(_test_patching , """os.rename""" , SCREAMING_SNAKE_CASE__ ): with patch_submodule(_test_patching , """os.path.join""" , SCREAMING_SNAKE_CASE__ ): with patch_submodule(_test_patching , """os.path.dirname""" , SCREAMING_SNAKE_CASE__ ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename def _UpperCamelCase ( ): '''simple docstring''' UpperCAmelCase__ = """__test_patch_submodule_doesnt_exist_mock__""" with patch_submodule(_test_patching , """__module_that_doesn_exist__.__attribute_that_doesn_exist__""" , SCREAMING_SNAKE_CASE__ ): pass with patch_submodule(_test_patching , """os.__attribute_that_doesn_exist__""" , SCREAMING_SNAKE_CASE__ ): pass
346
0
'''simple docstring''' import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { '''microsoft/wavlm-base''': '''https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json''', # See all WavLM models at https://huggingface.co/models?filter=wavlm } class lowercase_ (lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : str = """wavlm""" def __init__( self : Optional[Any] ,lowercase__ : Any=3_2 ,lowercase__ : int=7_6_8 ,lowercase__ : Any=1_2 ,lowercase__ : Union[str, Any]=1_2 ,lowercase__ : Union[str, Any]=3_0_7_2 ,lowercase__ : Union[str, Any]="gelu" ,lowercase__ : List[str]=0.1 ,lowercase__ : str=0.1 ,lowercase__ : str=0.1 ,lowercase__ : Dict=0.0 ,lowercase__ : str=0.1 ,lowercase__ : List[str]=0.1 ,lowercase__ : int=0.0_2 ,lowercase__ : List[str]=1e-5 ,lowercase__ : int="group" ,lowercase__ : int="gelu" ,lowercase__ : str=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) ,lowercase__ : Tuple=(5, 2, 2, 2, 2, 2, 2) ,lowercase__ : str=(1_0, 3, 3, 3, 3, 2, 2) ,lowercase__ : Dict=False ,lowercase__ : List[str]=1_2_8 ,lowercase__ : Dict=1_6 ,lowercase__ : List[str]=3_2_0 ,lowercase__ : List[str]=8_0_0 ,lowercase__ : int=False ,lowercase__ : Optional[Any]=True ,lowercase__ : Any=0.0_5 ,lowercase__ : Any=1_0 ,lowercase__ : List[str]=2 ,lowercase__ : Optional[Any]=0.0 ,lowercase__ : Union[str, Any]=1_0 ,lowercase__ : Optional[Any]=3_2_0 ,lowercase__ : Optional[Any]=2 ,lowercase__ : List[str]=0.1 ,lowercase__ : int=1_0_0 ,lowercase__ : int=2_5_6 ,lowercase__ : List[str]=2_5_6 ,lowercase__ : Dict=0.1 ,lowercase__ : Union[str, Any]="mean" ,lowercase__ : Optional[int]=False ,lowercase__ : str=False ,lowercase__ : List[str]=2_5_6 ,lowercase__ : List[Any]=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) ,lowercase__ : Optional[int]=(5, 3, 3, 1, 1) ,lowercase__ : Dict=(1, 2, 3, 1, 1) ,lowercase__ : Any=5_1_2 ,lowercase__ : Tuple=8_0 ,lowercase__ : str=0 ,lowercase__ : List[str]=1 ,lowercase__ : Optional[Any]=2 ,lowercase__ : List[Any]=False ,lowercase__ : int=3 ,lowercase__ : Dict=2 ,lowercase__ : List[Any]=3 ,lowercase__ : str=None ,**lowercase__ : List[Any] ,): super().__init__(**_UpperCAmelCase ,pad_token_id=_UpperCAmelCase ,bos_token_id=_UpperCAmelCase ,eos_token_id=_UpperCAmelCase ) __lowercase = hidden_size __lowercase = feat_extract_norm __lowercase = feat_extract_activation __lowercase = list(_UpperCAmelCase ) __lowercase = list(_UpperCAmelCase ) __lowercase = list(_UpperCAmelCase ) __lowercase = conv_bias __lowercase = num_buckets __lowercase = max_bucket_distance __lowercase = num_conv_pos_embeddings __lowercase = num_conv_pos_embedding_groups __lowercase = len(self.conv_dim ) __lowercase = num_hidden_layers __lowercase = intermediate_size __lowercase = hidden_act __lowercase = num_attention_heads __lowercase = hidden_dropout __lowercase = attention_dropout __lowercase = activation_dropout __lowercase = feat_proj_dropout __lowercase = final_dropout __lowercase = layerdrop __lowercase = layer_norm_eps __lowercase = initializer_range __lowercase = num_ctc_classes __lowercase = vocab_size __lowercase = do_stable_layer_norm __lowercase = use_weighted_layer_sum __lowercase = classifier_proj_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==''' ''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =''' F" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`," F" `len(config.conv_kernel) = {len(self.conv_kernel )}`." ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __lowercase = apply_spec_augment __lowercase = mask_time_prob __lowercase = mask_time_length __lowercase = mask_time_min_masks __lowercase = mask_feature_prob __lowercase = mask_feature_length # parameters for pretraining with codevector quantized representations __lowercase = num_codevectors_per_group __lowercase = num_codevector_groups __lowercase = contrastive_logits_temperature __lowercase = num_negatives __lowercase = codevector_dim __lowercase = proj_codevector_dim __lowercase = diversity_loss_weight # ctc loss __lowercase = ctc_loss_reduction __lowercase = ctc_zero_infinity # adapter __lowercase = add_adapter __lowercase = adapter_kernel_size __lowercase = adapter_stride __lowercase = num_adapter_layers __lowercase = output_hidden_size or hidden_size # SequenceClassification-specific parameter. Feel free to ignore for other classes. __lowercase = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. __lowercase = list(_UpperCAmelCase ) __lowercase = list(_UpperCAmelCase ) __lowercase = list(_UpperCAmelCase ) __lowercase = xvector_output_dim @property def SCREAMING_SNAKE_CASE ( self : int ): return functools.reduce(operator.mul ,self.conv_stride ,1 )
104
'''simple docstring''' from timeit import timeit UpperCAmelCase_ = { 'MALAYALAM': True, 'String': False, 'rotor': True, 'level': True, 'A': True, 'BB': True, 'ABC': False, 'amanaplanacanalpanama': True, # "a man a plan a canal panama" } # Ensure our test data is valid assert all((key == key[::-1]) is value for key, value in test_data.items()) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' UpperCAmelCase__ = 0 UpperCAmelCase__ = len(SCREAMING_SNAKE_CASE__ ) - 1 while start_i < end_i: if s[start_i] == s[end_i]: start_i += 1 end_i -= 1 else: return False return True def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' UpperCAmelCase__ = len(SCREAMING_SNAKE_CASE__ ) // 2 UpperCAmelCase__ = len(SCREAMING_SNAKE_CASE__ ) # We need to traverse till half of the length of string # as we can get access of the i'th last element from # i'th index. # eg: [0,1,2,3,4,5] => 4th index can be accessed # with the help of 1st index (i==n-i-1) # where n is length of string return all(s[i] == s[n - i - 1] for i in range(SCREAMING_SNAKE_CASE__ ) ) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' if len(SCREAMING_SNAKE_CASE__ ) <= 2: return True if s[0] == s[len(SCREAMING_SNAKE_CASE__ ) - 1]: return is_palindrome_recursive(s[1:-1] ) else: return False def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' return s == s[::-1] def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' UpperCAmelCase__ = F'''all({name}(key) is value for key, value in test_data.items())''' UpperCAmelCase__ = F'''from __main__ import test_data, {name}''' UpperCAmelCase__ = 500000 UpperCAmelCase__ = timeit(stmt=SCREAMING_SNAKE_CASE__ , setup=SCREAMING_SNAKE_CASE__ , number=SCREAMING_SNAKE_CASE__ ) print(F'''{name:<35} finished {number:,} runs in {result:.5f} seconds''' ) if __name__ == "__main__": for key, value in test_data.items(): assert is_palindrome(key) is is_palindrome_recursive(key) assert is_palindrome(key) is is_palindrome_slice(key) print(f"{key:21} {value}") print('a man a plan a canal panama') # finished 500,000 runs in 0.46793 seconds benchmark_function('is_palindrome_slice') # finished 500,000 runs in 0.85234 seconds benchmark_function('is_palindrome') # finished 500,000 runs in 1.32028 seconds benchmark_function('is_palindrome_recursive') # finished 500,000 runs in 2.08679 seconds benchmark_function('is_palindrome_traversal')
346
0
'''simple docstring''' from copy import deepcopy from typing import Optional, Union import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, is_tf_available, is_torch_available if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf class _a ( lowerCamelCase_ ): '''simple docstring''' A : Dict = ["""image_processor"""] A : int = """SamImageProcessor""" def __init__( self, A ): '''simple docstring''' super().__init__(_UpperCAmelCase ) SCREAMING_SNAKE_CASE : int = self.image_processor SCREAMING_SNAKE_CASE : Dict = -10 SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor.size['longest_edge'] def __call__( self, A=None, A=None, A=None, A=None, A = None, **A, ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processor( _UpperCAmelCase, return_tensors=_UpperCAmelCase, **_UpperCAmelCase, ) # pop arguments that are not used in the foward but used nevertheless SCREAMING_SNAKE_CASE : List[str] = encoding_image_processor['original_sizes'] if hasattr(_UpperCAmelCase, 'numpy' ): # Checks if Torch or TF tensor SCREAMING_SNAKE_CASE : Tuple = original_sizes.numpy() SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self._check_and_preprocess_points( input_points=_UpperCAmelCase, input_labels=_UpperCAmelCase, input_boxes=_UpperCAmelCase, ) SCREAMING_SNAKE_CASE : Dict = self._normalize_and_convert( _UpperCAmelCase, _UpperCAmelCase, input_points=_UpperCAmelCase, input_labels=_UpperCAmelCase, input_boxes=_UpperCAmelCase, return_tensors=_UpperCAmelCase, ) return encoding_image_processor def UpperCamelCase_ ( self, A, A, A=None, A=None, A=None, A="pt", ): '''simple docstring''' if input_points is not None: if len(_UpperCAmelCase ) != len(_UpperCAmelCase ): SCREAMING_SNAKE_CASE : Dict = [ self._normalize_coordinates(self.target_size, _UpperCAmelCase, original_sizes[0] ) for point in input_points ] else: SCREAMING_SNAKE_CASE : str = [ self._normalize_coordinates(self.target_size, _UpperCAmelCase, _UpperCAmelCase ) for point, original_size in zip(_UpperCAmelCase, _UpperCAmelCase ) ] # check that all arrays have the same shape if not all(point.shape == input_points[0].shape for point in input_points ): if input_labels is not None: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self._pad_points_and_labels(_UpperCAmelCase, _UpperCAmelCase ) SCREAMING_SNAKE_CASE : Union[str, Any] = np.array(_UpperCAmelCase ) if input_labels is not None: SCREAMING_SNAKE_CASE : Optional[Any] = np.array(_UpperCAmelCase ) if input_boxes is not None: if len(_UpperCAmelCase ) != len(_UpperCAmelCase ): SCREAMING_SNAKE_CASE : Dict = [ self._normalize_coordinates(self.target_size, _UpperCAmelCase, original_sizes[0], is_bounding_box=_UpperCAmelCase ) for box in input_boxes ] else: SCREAMING_SNAKE_CASE : str = [ self._normalize_coordinates(self.target_size, _UpperCAmelCase, _UpperCAmelCase, is_bounding_box=_UpperCAmelCase ) for box, original_size in zip(_UpperCAmelCase, _UpperCAmelCase ) ] SCREAMING_SNAKE_CASE : Optional[int] = np.array(_UpperCAmelCase ) if input_boxes is not None: if return_tensors == "pt": SCREAMING_SNAKE_CASE : int = torch.from_numpy(_UpperCAmelCase ) # boxes batch size of 1 by default SCREAMING_SNAKE_CASE : Union[str, Any] = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes elif return_tensors == "tf": SCREAMING_SNAKE_CASE : Union[str, Any] = tf.convert_to_tensor(_UpperCAmelCase ) # boxes batch size of 1 by default SCREAMING_SNAKE_CASE : Dict = tf.expand_dims(_UpperCAmelCase, 1 ) if len(input_boxes.shape ) != 3 else input_boxes encoding_image_processor.update({'input_boxes': input_boxes} ) if input_points is not None: if return_tensors == "pt": SCREAMING_SNAKE_CASE : str = torch.from_numpy(_UpperCAmelCase ) # point batch size of 1 by default SCREAMING_SNAKE_CASE : List[str] = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points elif return_tensors == "tf": SCREAMING_SNAKE_CASE : List[Any] = tf.convert_to_tensor(_UpperCAmelCase ) # point batch size of 1 by default SCREAMING_SNAKE_CASE : List[str] = tf.expand_dims(_UpperCAmelCase, 1 ) if len(input_points.shape ) != 4 else input_points encoding_image_processor.update({'input_points': input_points} ) if input_labels is not None: if return_tensors == "pt": SCREAMING_SNAKE_CASE : Tuple = torch.from_numpy(_UpperCAmelCase ) # point batch size of 1 by default SCREAMING_SNAKE_CASE : Dict = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels elif return_tensors == "tf": SCREAMING_SNAKE_CASE : str = tf.convert_to_tensor(_UpperCAmelCase ) # point batch size of 1 by default SCREAMING_SNAKE_CASE : str = tf.expand_dims(_UpperCAmelCase, 1 ) if len(input_labels.shape ) != 3 else input_labels encoding_image_processor.update({'input_labels': input_labels} ) return encoding_image_processor def UpperCamelCase_ ( self, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = max([point.shape[0] for point in input_points] ) SCREAMING_SNAKE_CASE : int = [] for i, point in enumerate(_UpperCAmelCase ): if point.shape[0] != expected_nb_points: SCREAMING_SNAKE_CASE : int = np.concatenate( [point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value], axis=0 ) SCREAMING_SNAKE_CASE : Dict = np.append(input_labels[i], [self.point_pad_value] ) processed_input_points.append(_UpperCAmelCase ) SCREAMING_SNAKE_CASE : Dict = processed_input_points return input_points, input_labels def UpperCamelCase_ ( self, A, A, A, A=False ): '''simple docstring''' SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = original_size SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self.image_processor._get_preprocess_shape(_UpperCAmelCase, longest_edge=_UpperCAmelCase ) SCREAMING_SNAKE_CASE : Optional[Any] = deepcopy(_UpperCAmelCase ).astype(_UpperCAmelCase ) if is_bounding_box: SCREAMING_SNAKE_CASE : Union[str, Any] = coords.reshape(-1, 2, 2 ) SCREAMING_SNAKE_CASE : int = coords[..., 0] * (new_w / old_w) SCREAMING_SNAKE_CASE : Optional[int] = coords[..., 1] * (new_h / old_h) if is_bounding_box: SCREAMING_SNAKE_CASE : Optional[int] = coords.reshape(-1, 4 ) return coords def UpperCamelCase_ ( self, A=None, A=None, A=None, ): '''simple docstring''' if input_points is not None: if hasattr(_UpperCAmelCase, 'numpy' ): # Checks for TF or Torch tensor SCREAMING_SNAKE_CASE : List[Any] = input_points.numpy().tolist() if not isinstance(_UpperCAmelCase, _UpperCAmelCase ) or not isinstance(input_points[0], _UpperCAmelCase ): raise ValueError('Input points must be a list of list of floating points.' ) SCREAMING_SNAKE_CASE : Optional[int] = [np.array(_UpperCAmelCase ) for input_point in input_points] else: SCREAMING_SNAKE_CASE : int = None if input_labels is not None: if hasattr(_UpperCAmelCase, 'numpy' ): SCREAMING_SNAKE_CASE : str = input_labels.numpy().tolist() if not isinstance(_UpperCAmelCase, _UpperCAmelCase ) or not isinstance(input_labels[0], _UpperCAmelCase ): raise ValueError('Input labels must be a list of list integers.' ) SCREAMING_SNAKE_CASE : int = [np.array(_UpperCAmelCase ) for label in input_labels] else: SCREAMING_SNAKE_CASE : Any = None if input_boxes is not None: if hasattr(_UpperCAmelCase, 'numpy' ): SCREAMING_SNAKE_CASE : int = input_boxes.numpy().tolist() if ( not isinstance(_UpperCAmelCase, _UpperCAmelCase ) or not isinstance(input_boxes[0], _UpperCAmelCase ) or not isinstance(input_boxes[0][0], _UpperCAmelCase ) ): raise ValueError('Input boxes must be a list of list of list of floating points.' ) SCREAMING_SNAKE_CASE : int = [np.array(_UpperCAmelCase ).astype(np.floataa ) for box in input_boxes] else: SCREAMING_SNAKE_CASE : Tuple = None return input_points, input_labels, input_boxes @property def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.image_processor.model_input_names return list(dict.fromkeys(_UpperCAmelCase ) ) def UpperCamelCase_ ( self, *A, **A ): '''simple docstring''' return self.image_processor.post_process_masks(*_UpperCAmelCase, **_UpperCAmelCase )
251
'''simple docstring''' import datasets from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py UpperCAmelCase_ = '\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",\n author = "Lin, Chin-Yew and\n Och, Franz Josef",\n booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",\n month = "aug 23{--}aug 27",\n year = "2004",\n address = "Geneva, Switzerland",\n publisher = "COLING",\n url = "https://www.aclweb.org/anthology/C04-1072",\n pages = "501--507",\n}\n' UpperCAmelCase_ = '\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,\nthe better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n' UpperCAmelCase_ = '\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n \'bleu\': bleu score,\n \'precisions\': geometric mean of n-gram precisions,\n \'brevity_penalty\': brevity penalty,\n \'length_ratio\': ratio of lengths,\n \'translation_length\': translation_length,\n \'reference_length\': reference_length\nExamples:\n\n >>> predictions = [\n ... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample\n ... ["foo", "bar", "foobar"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)\n ... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric("bleu")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results["bleu"])\n 1.0\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase_ ( datasets.Metric ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : int ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ), """references""": datasets.Sequence( datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ), } ) , codebase_urls=["""https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"""] , reference_urls=[ """https://en.wikipedia.org/wiki/BLEU""", """https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""", ] , ) def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any]=4 , _UpperCAmelCase : Union[str, Any]=False ): """simple docstring""" UpperCAmelCase__ = compute_bleu( reference_corpus=_UpperCAmelCase , translation_corpus=_UpperCAmelCase , max_order=_UpperCAmelCase , smooth=_UpperCAmelCase ) ((UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__)) = score return { "bleu": bleu, "precisions": precisions, "brevity_penalty": bp, "length_ratio": ratio, "translation_length": translation_length, "reference_length": reference_length, }
346
0
"""simple docstring""" import gc import random import unittest import numpy as np import torch from diffusers import ( DDIMScheduler, KandinskyVaaControlnetPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class A__ ( lowerCamelCase_ , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE = KandinskyVaaControlnetPipeline SCREAMING_SNAKE_CASE = ["""image_embeds""", """negative_image_embeds""", """hint"""] SCREAMING_SNAKE_CASE = ["""image_embeds""", """negative_image_embeds""", """hint"""] SCREAMING_SNAKE_CASE = [ """generator""", """height""", """width""", """latents""", """guidance_scale""", """num_inference_steps""", """return_dict""", """guidance_scale""", """num_images_per_prompt""", """output_type""", """return_dict""", ] SCREAMING_SNAKE_CASE = False @property def _SCREAMING_SNAKE_CASE ( self: List[str]) -> List[Any]: """simple docstring""" return 32 @property def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> List[str]: """simple docstring""" return 32 @property def _SCREAMING_SNAKE_CASE ( self: Any) -> List[str]: """simple docstring""" return self.time_input_dim @property def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> Any: """simple docstring""" return self.time_input_dim * 4 @property def _SCREAMING_SNAKE_CASE ( self: List[str]) -> Optional[Any]: """simple docstring""" return 100 @property def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> Dict: """simple docstring""" torch.manual_seed(0) __lowerCAmelCase : Optional[Any] = { "in_channels": 8, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "image_hint", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } __lowerCAmelCase : Dict = UNetaDConditionModel(**_UpperCAmelCase) return model @property def _SCREAMING_SNAKE_CASE ( self: str) -> Any: """simple docstring""" return { "block_out_channels": [32, 32, 64, 64], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> Any: """simple docstring""" torch.manual_seed(0) __lowerCAmelCase : Dict = VQModel(**self.dummy_movq_kwargs) return model def _SCREAMING_SNAKE_CASE ( self: Any) -> Optional[int]: """simple docstring""" __lowerCAmelCase : Tuple = self.dummy_unet __lowerCAmelCase : Dict = self.dummy_movq __lowerCAmelCase : Any = DDIMScheduler( num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=_UpperCAmelCase , set_alpha_to_one=_UpperCAmelCase , steps_offset=1 , prediction_type="epsilon" , thresholding=_UpperCAmelCase , ) __lowerCAmelCase : str = { "unet": unet, "scheduler": scheduler, "movq": movq, } return components def _SCREAMING_SNAKE_CASE ( self: Dict , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Union[str, Any]=0) -> int: """simple docstring""" __lowerCAmelCase : Any = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_UpperCAmelCase)).to(_UpperCAmelCase) __lowerCAmelCase : str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1)).to( _UpperCAmelCase) # create hint __lowerCAmelCase : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_UpperCAmelCase)).to(_UpperCAmelCase) if str(_UpperCAmelCase).startswith("mps"): __lowerCAmelCase : Optional[Any] = torch.manual_seed(_UpperCAmelCase) else: __lowerCAmelCase : Dict = torch.Generator(device=_UpperCAmelCase).manual_seed(_UpperCAmelCase) __lowerCAmelCase : int = { "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "hint": hint, "generator": generator, "height": 64, "width": 64, "guidance_scale": 4.0, "num_inference_steps": 2, "output_type": "np", } return inputs def _SCREAMING_SNAKE_CASE ( self: int) -> int: """simple docstring""" __lowerCAmelCase : Optional[int] = "cpu" __lowerCAmelCase : List[str] = self.get_dummy_components() __lowerCAmelCase : Optional[Any] = self.pipeline_class(**_UpperCAmelCase) __lowerCAmelCase : Union[str, Any] = pipe.to(_UpperCAmelCase) pipe.set_progress_bar_config(disable=_UpperCAmelCase) __lowerCAmelCase : str = pipe(**self.get_dummy_inputs(_UpperCAmelCase)) __lowerCAmelCase : str = output.images __lowerCAmelCase : Tuple = pipe( **self.get_dummy_inputs(_UpperCAmelCase) , return_dict=_UpperCAmelCase , )[0] __lowerCAmelCase : Tuple = image[0, -3:, -3:, -1] __lowerCAmelCase : List[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __lowerCAmelCase : str = np.array( [0.695_9826, 0.86_8279, 0.755_8092, 0.6876_9467, 0.8580_5804, 0.6597_7496, 0.4488_5302, 0.595_9111, 0.425_1595]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 ), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}""" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 ), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}""" @slow @require_torch_gpu class A__ ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self: List[str]) -> List[Any]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _SCREAMING_SNAKE_CASE ( self: Any) -> Dict: """simple docstring""" __lowerCAmelCase : Union[str, Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy") __lowerCAmelCase : Optional[int] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/hint_image_cat.png") __lowerCAmelCase : Any = torch.from_numpy(np.array(_UpperCAmelCase)).float() / 255.0 __lowerCAmelCase : Optional[int] = hint.permute(2 , 0 , 1).unsqueeze(0) __lowerCAmelCase : Any = KandinskyVaaPriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa) pipe_prior.to(_UpperCAmelCase) __lowerCAmelCase : Dict = KandinskyVaaControlnetPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa) __lowerCAmelCase : Any = pipeline.to(_UpperCAmelCase) pipeline.set_progress_bar_config(disable=_UpperCAmelCase) __lowerCAmelCase : Dict = "A robot, 4k photo" __lowerCAmelCase : List[str] = torch.Generator(device="cuda").manual_seed(0) __lowerCAmelCase , __lowerCAmelCase : List[Any] = pipe_prior( _UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple() __lowerCAmelCase : Any = torch.Generator(device="cuda").manual_seed(0) __lowerCAmelCase : str = pipeline( image_embeds=_UpperCAmelCase , negative_image_embeds=_UpperCAmelCase , hint=_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=100 , output_type="np" , ) __lowerCAmelCase : List[Any] = output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase)
269
'''simple docstring''' from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import torch from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available @dataclass class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' lowerCAmelCase_ : Union[List[np.ndarray], torch.FloatTensor] try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_text_to_video_synth import TextToVideoSDPipeline from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401 from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
346
0
from __future__ import annotations from math import pi # Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of # Pi and the function __a = 1.0_5457_1817e-34 # unit of ℏ : J * s __a = 3e8 # unit of c : m * s^-1 def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->List[str]: """simple docstring""" if (force, area, distance).count(0 ) != 1: raise ValueError('''One and only one argument must be 0''' ) if force < 0: raise ValueError('''Magnitude of force can not be negative''' ) if distance < 0: raise ValueError('''Distance can not be negative''' ) if area < 0: raise ValueError('''Area can not be negative''' ) if force == 0: lowercase : Optional[Any] = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / ( 240 * (distance) ** 4 ) return {"force": force} elif area == 0: lowercase : Union[str, Any] = (240 * force * (distance) ** 4) / ( REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 ) return {"area": area} elif distance == 0: lowercase : int = ( (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force) ) ** (1 / 4) return {"distance": distance} raise ValueError('''One and only one argument must be 0''' ) # Run doctest if __name__ == "__main__": import doctest doctest.testmod()
337
'''simple docstring''' import torch import torch.nn as nn from transformers.modeling_utils import ModuleUtilsMixin from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class lowerCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): '''simple docstring''' @register_to_config def __init__( self : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : float , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : bool = False , ): """simple docstring""" super().__init__() UpperCAmelCase__ = nn.Embedding(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = nn.Embedding(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = False UpperCAmelCase__ = nn.Dropout(p=_UpperCAmelCase ) UpperCAmelCase__ = TaConfig( vocab_size=_UpperCAmelCase , d_model=_UpperCAmelCase , num_heads=_UpperCAmelCase , d_kv=_UpperCAmelCase , d_ff=_UpperCAmelCase , dropout_rate=_UpperCAmelCase , feed_forward_proj=_UpperCAmelCase , is_decoder=_UpperCAmelCase , is_encoder_decoder=_UpperCAmelCase , ) UpperCAmelCase__ = nn.ModuleList() for lyr_num in range(_UpperCAmelCase ): UpperCAmelCase__ = TaBlock(_UpperCAmelCase ) self.encoders.append(_UpperCAmelCase ) UpperCAmelCase__ = TaLayerNorm(_UpperCAmelCase ) UpperCAmelCase__ = nn.Dropout(p=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : int , _UpperCAmelCase : str ): """simple docstring""" UpperCAmelCase__ = self.token_embedder(_UpperCAmelCase ) UpperCAmelCase__ = encoder_input_tokens.shape[1] UpperCAmelCase__ = torch.arange(_UpperCAmelCase , device=encoder_input_tokens.device ) x += self.position_encoding(_UpperCAmelCase ) UpperCAmelCase__ = self.dropout_pre(_UpperCAmelCase ) # inverted the attention mask UpperCAmelCase__ = encoder_input_tokens.size() UpperCAmelCase__ = self.get_extended_attention_mask(_UpperCAmelCase , _UpperCAmelCase ) for lyr in self.encoders: UpperCAmelCase__ = lyr(_UpperCAmelCase , _UpperCAmelCase )[0] UpperCAmelCase__ = self.layer_norm(_UpperCAmelCase ) return self.dropout_post(_UpperCAmelCase ), encoder_inputs_mask
346
0
from __future__ import annotations from collections.abc import Callable from typing import Generic, TypeVar snake_case_ = TypeVar('''T''') snake_case_ = TypeVar('''U''') class SCREAMING_SNAKE_CASE__ (Generic[T, U] ): def __init__( self , a , a): lowercase__ : Any = key lowercase__ : Tuple = val lowercase__ : List[Any] = None lowercase__ : Optional[Any] = None def __repr__( self): return ( f"""Node: key: {self.key}, val: {self.val}, """ f"""has next: {bool(self.next)}, has prev: {bool(self.prev)}""" ) class SCREAMING_SNAKE_CASE__ (Generic[T, U] ): def __init__( self): lowercase__ : Union[str, Any] = DoubleLinkedListNode(_UpperCAmelCase , _UpperCAmelCase) lowercase__ : List[str] = DoubleLinkedListNode(_UpperCAmelCase , _UpperCAmelCase) lowercase__ , lowercase__ : List[str] = self.rear, self.head def __repr__( self): lowercase__ : Optional[Any] = ['DoubleLinkedList'] lowercase__ : int = self.head while node.next is not None: rep.append(str(_UpperCAmelCase)) lowercase__ : Optional[int] = node.next rep.append(str(self.rear)) return ",\n ".join(_UpperCAmelCase) def snake_case_ ( self , a): lowercase__ : Optional[int] = self.rear.prev # All nodes other than self.head are guaranteed to have non-None previous assert previous is not None lowercase__ : Union[str, Any] = node lowercase__ : str = previous lowercase__ : List[str] = node lowercase__ : Any = self.rear def snake_case_ ( self , a): if node.prev is None or node.next is None: return None lowercase__ : Tuple = node.next lowercase__ : str = node.prev lowercase__ : List[str] = None lowercase__ : Optional[Any] = None return node class SCREAMING_SNAKE_CASE__ (Generic[T, U] ): __lowerCamelCase : dict[Callable[[T], U], LRUCache[T, U]] = {} def __init__( self , a): lowercase__ : Union[str, Any] = DoubleLinkedList() lowercase__ : int = capacity lowercase__ : Dict = 0 lowercase__ : List[str] = 0 lowercase__ : Dict = 0 lowercase__ : str = {} def __repr__( self): return ( f"""CacheInfo(hits={self.hits}, misses={self.miss}, """ f"""capacity={self.capacity}, current size={self.num_keys})""" ) def __contains__( self , a): return key in self.cache def snake_case_ ( self , a): if key in self.cache: self.hits += 1 lowercase__ : Optional[int] = self.cache[key] lowercase__ : int = self.list.remove(self.cache[key]) assert node == value_node # node is guaranteed not None because it is in self.cache assert node is not None self.list.add(_UpperCAmelCase) return node.val self.miss += 1 return None def snake_case_ ( self , a , a): if key not in self.cache: if self.num_keys >= self.capacity: # delete first node (oldest) when over capacity lowercase__ : List[str] = self.list.head.next # guaranteed to have a non-None first node when num_keys > 0 # explain to type checker via assertions assert first_node is not None assert first_node.key is not None assert ( self.list.remove(_UpperCAmelCase) is not None ) # node guaranteed to be in list assert node.key is not None del self.cache[first_node.key] self.num_keys -= 1 lowercase__ : List[str] = DoubleLinkedListNode(_UpperCAmelCase , _UpperCAmelCase) self.list.add(self.cache[key]) self.num_keys += 1 else: # bump node to the end of the list, update value lowercase__ : str = self.list.remove(self.cache[key]) assert node is not None # node guaranteed to be in list lowercase__ : List[Any] = value self.list.add(_UpperCAmelCase) @classmethod def snake_case_ ( cls , a = 128): def cache_decorator_inner(a) -> Callable[..., U]: def cache_decorator_wrapper(*a) -> U: if func not in cls.decorator_function_to_instance_map: lowercase__ : Optional[Any] = LRUCache(_UpperCAmelCase) lowercase__ : Tuple = cls.decorator_function_to_instance_map[func].get(args[0]) if result is None: lowercase__ : List[Any] = func(*_UpperCAmelCase) cls.decorator_function_to_instance_map[func].put(args[0] , _UpperCAmelCase) return result def cache_info() -> LRUCache[T, U]: return cls.decorator_function_to_instance_map[func] setattr(_UpperCAmelCase , 'cache_info' , _UpperCAmelCase) # noqa: B010 return cache_decorator_wrapper return cache_decorator_inner if __name__ == "__main__": import doctest doctest.testmod()
214
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConfig, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaForPreTraining, WavaVecaProcessor, logging, ) from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification logging.set_verbosity_info() UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'adapter_layer': 'encoder.layers.*.adapter_layer', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', 'pooling_layer.linear': 'projector', 'pooling_layer.projection': 'classifier', } UpperCAmelCase_ = [ 'lm_head', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', 'projector', 'classifier', ] def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Tuple ): '''simple docstring''' UpperCAmelCase__ = {} with open(SCREAMING_SNAKE_CASE__ , """r""" ) as file: for line_number, line in enumerate(SCREAMING_SNAKE_CASE__ ): UpperCAmelCase__ = line.strip() if line: UpperCAmelCase__ = line.split() UpperCAmelCase__ = line_number UpperCAmelCase__ = words[0] UpperCAmelCase__ = value return result def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ): '''simple docstring''' for attribute in key.split(""".""" ): UpperCAmelCase__ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(SCREAMING_SNAKE_CASE__ ): UpperCAmelCase__ = PARAM_MAPPING[full_name.split(""".""" )[-1]] UpperCAmelCase__ = """param""" if weight_type is not None and weight_type != "param": UpperCAmelCase__ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).shape elif weight_type is not None and weight_type == "param": UpperCAmelCase__ = hf_pointer for attribute in hf_param_name.split(""".""" ): UpperCAmelCase__ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = shape_pointer.shape # let's reduce dimension UpperCAmelCase__ = value[0] else: UpperCAmelCase__ = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": UpperCAmelCase__ = value elif weight_type == "weight_g": UpperCAmelCase__ = value elif weight_type == "weight_v": UpperCAmelCase__ = value elif weight_type == "bias": UpperCAmelCase__ = value elif weight_type == "param": for attribute in hf_param_name.split(""".""" ): UpperCAmelCase__ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = value else: UpperCAmelCase__ = value logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' ) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(SCREAMING_SNAKE_CASE__ ): UpperCAmelCase__ = PARAM_MAPPING[full_name.split(""".""" )[-1]] UpperCAmelCase__ = """param""" if weight_type is not None and weight_type != "param": UpperCAmelCase__ = """.""".join([key, weight_type] ) elif weight_type is not None and weight_type == "param": UpperCAmelCase__ = """.""".join([key, hf_param_name] ) else: UpperCAmelCase__ = key UpperCAmelCase__ = value if """lm_head""" in full_key else value[0] UpperCAmelCase_ = { 'W_a': 'linear_1.weight', 'W_b': 'linear_2.weight', 'b_a': 'linear_1.bias', 'b_b': 'linear_2.bias', 'ln_W': 'norm.weight', 'ln_b': 'norm.bias', } def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None ): '''simple docstring''' UpperCAmelCase__ = False for key, mapped_key in MAPPING.items(): UpperCAmelCase__ = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: UpperCAmelCase__ = True if "*" in mapped_key: UpperCAmelCase__ = name.split(SCREAMING_SNAKE_CASE__ )[0].split(""".""" )[-2] UpperCAmelCase__ = mapped_key.replace("""*""" , SCREAMING_SNAKE_CASE__ ) if "weight_g" in name: UpperCAmelCase__ = """weight_g""" elif "weight_v" in name: UpperCAmelCase__ = """weight_v""" elif "bias" in name: UpperCAmelCase__ = """bias""" elif "weight" in name: # TODO: don't match quantizer.weight_proj UpperCAmelCase__ = """weight""" else: UpperCAmelCase__ = None if hf_dict is not None: rename_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else: set_recursively(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return is_used return is_used def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] ): '''simple docstring''' UpperCAmelCase__ = [] UpperCAmelCase__ = fairseq_model.state_dict() UpperCAmelCase__ = hf_model.wavaveca.feature_extractor for name, value in fairseq_dict.items(): UpperCAmelCase__ = False if "conv_layers" in name: load_conv_layer( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , hf_model.config.feat_extract_norm == """group""" , ) UpperCAmelCase__ = True else: UpperCAmelCase__ = load_wavaveca_layer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if not is_used: unused_weights.append(SCREAMING_SNAKE_CASE__ ) logger.warning(F'''Unused weights: {unused_weights}''' ) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' UpperCAmelCase__ = full_name.split("""conv_layers.""" )[-1] UpperCAmelCase__ = name.split(""".""" ) UpperCAmelCase__ = int(items[0] ) UpperCAmelCase__ = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) UpperCAmelCase__ = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) UpperCAmelCase__ = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' ) UpperCAmelCase__ = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' ) UpperCAmelCase__ = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(SCREAMING_SNAKE_CASE__ ) @torch.no_grad() def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False ): '''simple docstring''' if config_path is not None: UpperCAmelCase__ = WavaVecaConfig.from_pretrained(SCREAMING_SNAKE_CASE__ ) else: UpperCAmelCase__ = WavaVecaConfig() if is_seq_class: UpperCAmelCase__ = read_txt_into_dict(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = idalabel UpperCAmelCase__ = WavaVecaForSequenceClassification(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , ) feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE__ ) elif is_finetuned: if dict_path: UpperCAmelCase__ = Dictionary.load(SCREAMING_SNAKE_CASE__ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq UpperCAmelCase__ = target_dict.pad_index UpperCAmelCase__ = target_dict.bos_index UpperCAmelCase__ = target_dict.eos_index UpperCAmelCase__ = len(target_dict.symbols ) UpperCAmelCase__ = os.path.join(SCREAMING_SNAKE_CASE__ , """vocab.json""" ) if not os.path.isdir(SCREAMING_SNAKE_CASE__ ): logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(SCREAMING_SNAKE_CASE__ ) ) return os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = target_dict.indices # fairseq has the <pad> and <s> switched UpperCAmelCase__ = 0 UpperCAmelCase__ = 1 with open(SCREAMING_SNAKE_CASE__ , """w""" , encoding="""utf-8""" ) as vocab_handle: json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = WavaVecaCTCTokenizer( SCREAMING_SNAKE_CASE__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=SCREAMING_SNAKE_CASE__ , ) UpperCAmelCase__ = True if config.feat_extract_norm == """layer""" else False UpperCAmelCase__ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , ) UpperCAmelCase__ = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ ) processor.save_pretrained(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = WavaVecaForCTC(SCREAMING_SNAKE_CASE__ ) else: UpperCAmelCase__ = WavaVecaForPreTraining(SCREAMING_SNAKE_CASE__ ) if is_finetuned or is_seq_class: UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) else: UpperCAmelCase__ = argparse.Namespace(task="""audio_pretraining""" ) UpperCAmelCase__ = fairseq.tasks.setup_task(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = model[0].eval() recursively_load_weights(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , not is_finetuned ) hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not' ) parser.add_argument( '--is_seq_class', action='store_true', help='Whether the model to convert is a fine-tuned sequence classification model or not', ) UpperCAmelCase_ = parser.parse_args() UpperCAmelCase_ = not args.not_finetuned and not args.is_seq_class convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, is_finetuned, args.is_seq_class, )
346
0
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy import tensorflow as tf from transformers import ( TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, BertConfig, DPRConfig, TFDPRContextEncoder, TFDPRQuestionEncoder, TFDPRReader, ) class _lowercase : '''simple docstring''' def __init__( self :str , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Union[str, Any]=13 , lowerCAmelCase__ :List[Any]=7 , lowerCAmelCase__ :Optional[int]=True , lowerCAmelCase__ :Any=True , lowerCAmelCase__ :str=True , lowerCAmelCase__ :Tuple=True , lowerCAmelCase__ :int=99 , lowerCAmelCase__ :Optional[Any]=32 , lowerCAmelCase__ :Optional[int]=2 , lowerCAmelCase__ :Tuple=4 , lowerCAmelCase__ :str=37 , lowerCAmelCase__ :int="gelu" , lowerCAmelCase__ :Any=0.1 , lowerCAmelCase__ :Union[str, Any]=0.1 , lowerCAmelCase__ :str=512 , lowerCAmelCase__ :Tuple=16 , lowerCAmelCase__ :Optional[Any]=2 , lowerCAmelCase__ :Optional[int]=0.02 , lowerCAmelCase__ :int=3 , lowerCAmelCase__ :Union[str, Any]=4 , lowerCAmelCase__ :Optional[int]=None , lowerCAmelCase__ :Any=0 , ) -> str: __SCREAMING_SNAKE_CASE : Any = parent __SCREAMING_SNAKE_CASE : List[Any] = batch_size __SCREAMING_SNAKE_CASE : Dict = seq_length __SCREAMING_SNAKE_CASE : Dict = is_training __SCREAMING_SNAKE_CASE : Tuple = use_input_mask __SCREAMING_SNAKE_CASE : List[str] = use_token_type_ids __SCREAMING_SNAKE_CASE : List[str] = use_labels __SCREAMING_SNAKE_CASE : Union[str, Any] = vocab_size __SCREAMING_SNAKE_CASE : Tuple = hidden_size __SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers __SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads __SCREAMING_SNAKE_CASE : List[str] = intermediate_size __SCREAMING_SNAKE_CASE : Optional[int] = hidden_act __SCREAMING_SNAKE_CASE : List[str] = hidden_dropout_prob __SCREAMING_SNAKE_CASE : Optional[int] = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE : Tuple = max_position_embeddings __SCREAMING_SNAKE_CASE : str = type_vocab_size __SCREAMING_SNAKE_CASE : List[str] = type_sequence_label_size __SCREAMING_SNAKE_CASE : int = initializer_range __SCREAMING_SNAKE_CASE : Tuple = num_labels __SCREAMING_SNAKE_CASE : Optional[Any] = num_choices __SCREAMING_SNAKE_CASE : Dict = scope __SCREAMING_SNAKE_CASE : Tuple = projection_dim def __magic_name__( self :str ) -> Dict: __SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __SCREAMING_SNAKE_CASE : Optional[Any] = None if self.use_input_mask: # follow test_modeling_tf_ctrl.py __SCREAMING_SNAKE_CASE : Dict = random_attention_mask([self.batch_size, self.seq_length] ) __SCREAMING_SNAKE_CASE : List[Any] = None if self.use_token_type_ids: __SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __SCREAMING_SNAKE_CASE : Any = None __SCREAMING_SNAKE_CASE : List[Any] = None __SCREAMING_SNAKE_CASE : List[Any] = None if self.use_labels: __SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size] , self.num_choices ) __SCREAMING_SNAKE_CASE : Optional[int] = BertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , ) __SCREAMING_SNAKE_CASE : Any = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __magic_name__( self :Tuple , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Optional[Any] ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE : Tuple = TFDPRContextEncoder(config=_UpperCAmelCase ) __SCREAMING_SNAKE_CASE : int = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase ) __SCREAMING_SNAKE_CASE : List[Any] = model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase ) __SCREAMING_SNAKE_CASE : Tuple = model(_UpperCAmelCase ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) ) def __magic_name__( self :Optional[int] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :str , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Optional[Any] ) -> Optional[int]: __SCREAMING_SNAKE_CASE : Tuple = TFDPRQuestionEncoder(config=_UpperCAmelCase ) __SCREAMING_SNAKE_CASE : int = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase ) __SCREAMING_SNAKE_CASE : Tuple = model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase ) __SCREAMING_SNAKE_CASE : Dict = model(_UpperCAmelCase ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) ) def __magic_name__( self :str , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[Any] ) -> List[str]: __SCREAMING_SNAKE_CASE : int = TFDPRReader(config=_UpperCAmelCase ) __SCREAMING_SNAKE_CASE : Union[str, Any] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) ) def __magic_name__( self :Optional[Any] ) -> List[Any]: __SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_config_and_inputs() ( ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ) : List[str] = config_and_inputs __SCREAMING_SNAKE_CASE : str = {'''input_ids''': input_ids} return config, inputs_dict @require_tf class _lowercase ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Tuple = ( ( TFDPRContextEncoder, TFDPRQuestionEncoder, TFDPRReader, ) if is_tf_available() else () ) SCREAMING_SNAKE_CASE__ : List[str] = {"""feature-extraction""": TFDPRQuestionEncoder} if is_tf_available() else {} SCREAMING_SNAKE_CASE__ : int = False SCREAMING_SNAKE_CASE__ : Optional[int] = False SCREAMING_SNAKE_CASE__ : Union[str, Any] = False SCREAMING_SNAKE_CASE__ : Any = False SCREAMING_SNAKE_CASE__ : Union[str, Any] = False def __magic_name__( self :Optional[Any] ) -> Dict: __SCREAMING_SNAKE_CASE : List[str] = TFDPRModelTester(self ) __SCREAMING_SNAKE_CASE : str = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 ) def __magic_name__( self :Optional[int] ) -> int: self.config_tester.run_common_tests() def __magic_name__( self :Any ) -> Optional[Any]: __SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_dpr_context_encoder(*_UpperCAmelCase ) def __magic_name__( self :List[str] ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_dpr_question_encoder(*_UpperCAmelCase ) def __magic_name__( self :List[str] ) -> Dict: __SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_dpr_reader(*_UpperCAmelCase ) @slow def __magic_name__( self :Dict ) -> Tuple: for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __SCREAMING_SNAKE_CASE : Dict = TFDPRContextEncoder.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __SCREAMING_SNAKE_CASE : Tuple = TFDPRContextEncoder.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __SCREAMING_SNAKE_CASE : Optional[Any] = TFDPRQuestionEncoder.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __SCREAMING_SNAKE_CASE : Dict = TFDPRReader.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) @require_tf class _lowercase ( unittest.TestCase ): '''simple docstring''' @slow def __magic_name__( self :str ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE : int = TFDPRQuestionEncoder.from_pretrained('''facebook/dpr-question_encoder-single-nq-base''' ) __SCREAMING_SNAKE_CASE : Union[str, Any] = tf.constant( [[101, 7_592, 1_010, 2_003, 2_026, 3_899, 10_140, 1_029, 102]] ) # [CLS] hello, is my dog cute? [SEP] __SCREAMING_SNAKE_CASE : Union[str, Any] = model(_UpperCAmelCase )[0] # embedding shape = (1, 768) # compare the actual values for a slice. __SCREAMING_SNAKE_CASE : List[str] = tf.constant( [ [ 0.0323_6253, 0.1275_3335, 0.1681_8509, 0.0027_9786, 0.389_6933, 0.2426_4945, 0.217_8971, -0.0233_5227, -0.0848_1959, -0.1432_4117, ] ] ) self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1E-4 ) )
9
'''simple docstring''' import itertools import os from collections import Counter, defaultdict from concurrent.futures import ThreadPoolExecutor, as_completed import numpy as np import datasets from .execute import check_correctness UpperCAmelCase_ = '\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n' UpperCAmelCase_ = '\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper "Evaluating Large Language Models Trained on Code"\n(https://arxiv.org/abs/2107.03374).\n' UpperCAmelCase_ = '\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric("code_eval")\n >>> test_cases = ["assert add(2,3)==5"]\n >>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {\'pass@1\': 0.5, \'pass@2\': 1.0}\n' UpperCAmelCase_ = '\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe "code_eval" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper "Evaluating Large\nLanguage Models Trained on Code" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"\n\n################################################################################\\n' UpperCAmelCase_ = 'The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the "Software"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase_ ( datasets.Metric ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" return datasets.MetricInfo( # This is the description that will appear on the metrics page. description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""" ) ), """references""": datasets.Value("""string""" ), } ) , homepage="""https://github.com/openai/human-eval""" , codebase_urls=["""https://github.com/openai/human-eval"""] , reference_urls=["""https://github.com/openai/human-eval"""] , license=_LICENSE , ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str]=[1, 10, 1_00] , _UpperCAmelCase : Optional[Any]=4 , _UpperCAmelCase : Any=3.0 ): """simple docstring""" if os.getenv("""HF_ALLOW_CODE_EVAL""" , 0 ) != "1": raise ValueError(_WARNING ) if os.name == "nt": raise NotImplementedError("""This metric is currently not supported on Windows.""" ) with ThreadPoolExecutor(max_workers=_UpperCAmelCase ) as executor: UpperCAmelCase__ = [] UpperCAmelCase__ = Counter() UpperCAmelCase__ = 0 UpperCAmelCase__ = defaultdict(_UpperCAmelCase ) for task_id, (candidates, test_case) in enumerate(zip(_UpperCAmelCase , _UpperCAmelCase ) ): for candidate in candidates: UpperCAmelCase__ = candidate + """\n""" + test_case UpperCAmelCase__ = (test_program, timeout, task_id, completion_id[task_id]) UpperCAmelCase__ = executor.submit(_UpperCAmelCase , *_UpperCAmelCase ) futures.append(_UpperCAmelCase ) completion_id[task_id] += 1 n_samples += 1 for future in as_completed(_UpperCAmelCase ): UpperCAmelCase__ = future.result() results[result["task_id"]].append((result["""completion_id"""], result) ) UpperCAmelCase__ , UpperCAmelCase__ = [], [] for result in results.values(): result.sort() UpperCAmelCase__ = [r[1]["""passed"""] for r in result] total.append(len(_UpperCAmelCase ) ) correct.append(sum(_UpperCAmelCase ) ) UpperCAmelCase__ = np.array(_UpperCAmelCase ) UpperCAmelCase__ = np.array(_UpperCAmelCase ) UpperCAmelCase__ = k UpperCAmelCase__ = {f'''pass@{k}''': estimate_pass_at_k(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).mean() for k in ks if (total >= k).all()} return pass_at_k, results def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] ): '''simple docstring''' def estimator(SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> float: if n - c < k: return 1.0 return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): UpperCAmelCase__ = itertools.repeat(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ) else: assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = iter(SCREAMING_SNAKE_CASE__ ) return np.array([estimator(int(SCREAMING_SNAKE_CASE__ ) , int(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) for n, c in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )] )
346
0
"""simple docstring""" from typing import List, Optional, Union import numpy as np import PIL.Image from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, PILImageResampling, get_image_size, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging __A = logging.get_logger(__name__) class UpperCAmelCase (lowerCamelCase_ ): """simple docstring""" _UpperCAmelCase :List[str] = ["""pixel_values"""] def __init__( self , _UpperCAmelCase = True , _UpperCAmelCase = 32 , _UpperCAmelCase=PILImageResampling.BILINEAR , _UpperCAmelCase = True , **_UpperCAmelCase , ): lowercase__: Dict = do_resize lowercase__: List[Any] = do_rescale lowercase__: Dict = size_divisor lowercase__: List[str] = resample super().__init__(**_UpperCAmelCase ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase ): lowercase__, lowercase__: Optional[Any] = get_image_size(_UpperCAmelCase ) # Rounds the height and width down to the closest multiple of size_divisor lowercase__: Any = height // size_divisor * size_divisor lowercase__: Tuple = width // size_divisor * size_divisor lowercase__: str = resize(_UpperCAmelCase , (new_h, new_w) , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase ) return image def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase ): return rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase=None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = ChannelDimension.FIRST , **_UpperCAmelCase , ): lowercase__: Tuple = do_resize if do_resize is not None else self.do_resize lowercase__: Tuple = do_rescale if do_rescale is not None else self.do_rescale lowercase__: Union[str, Any] = size_divisor if size_divisor is not None else self.size_divisor lowercase__: Dict = resample if resample is not None else self.resample if do_resize and size_divisor is None: raise ValueError('''size_divisor is required for resizing''' ) lowercase__: Tuple = make_list_of_images(_UpperCAmelCase ) if not valid_images(_UpperCAmelCase ): raise ValueError('''Invalid image(s)''' ) # All transformations expect numpy arrays. lowercase__: Tuple = [to_numpy_array(_UpperCAmelCase ) for img in images] if do_resize: lowercase__: List[str] = [self.resize(_UpperCAmelCase , size_divisor=_UpperCAmelCase , resample=_UpperCAmelCase ) for image in images] if do_rescale: lowercase__: str = [self.rescale(_UpperCAmelCase , scale=1 / 255 ) for image in images] lowercase__: Tuple = [to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase ) for image in images] lowercase__: List[Any] = {'''pixel_values''': images} return BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase )
177
'''simple docstring''' import math def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or not number % 2: # Negatives, 0, 1 and all even numbers are not primes return False UpperCAmelCase__ = range(3 , int(math.sqrt(SCREAMING_SNAKE_CASE__ ) + 1 ) , 2 ) return not any(not number % i for i in odd_numbers ) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str]=1 , **SCREAMING_SNAKE_CASE__ : List[str] ): '''simple docstring''' UpperCAmelCase__ = factor * value UpperCAmelCase__ = value while not is_prime(SCREAMING_SNAKE_CASE__ ): value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1 if value == first_value_val: return next_prime(value + 1 , **SCREAMING_SNAKE_CASE__ ) return value
346
0
'''simple docstring''' class UpperCAmelCase : '''simple docstring''' def __init__( self , __lowerCAmelCase , __lowerCAmelCase ) -> Any: lowercase__ : Optional[Any] = name lowercase__ : Optional[Any] = val def __str__( self ) -> Tuple: return F"""{self.__class__.__name__}({self.name}, {self.val})""" def __lt__( self , __lowerCAmelCase ) -> Optional[int]: return self.val < other.val class UpperCAmelCase : '''simple docstring''' def __init__( self , __lowerCAmelCase ) -> Tuple: lowercase__ : Optional[Any] = {} lowercase__ : Union[str, Any] = {} lowercase__ : List[Any] = self.build_heap(_UpperCAmelCase ) def __getitem__( self , __lowerCAmelCase ) -> List[str]: return self.get_value(_UpperCAmelCase ) def _lowerCAmelCase( self , __lowerCAmelCase ) -> Tuple: return (idx - 1) // 2 def _lowerCAmelCase( self , __lowerCAmelCase ) -> List[str]: return idx * 2 + 1 def _lowerCAmelCase( self , __lowerCAmelCase ) -> int: return idx * 2 + 2 def _lowerCAmelCase( self , __lowerCAmelCase ) -> Tuple: return self.heap_dict[key] def _lowerCAmelCase( self , __lowerCAmelCase ) -> List[Any]: lowercase__ : str = len(_UpperCAmelCase ) - 1 lowercase__ : Optional[Any] = self.get_parent_idx(_UpperCAmelCase ) for idx, i in enumerate(_UpperCAmelCase ): lowercase__ : int = idx lowercase__ : str = i.val for i in range(_UpperCAmelCase , -1 , -1 ): self.sift_down(_UpperCAmelCase , _UpperCAmelCase ) return array def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase ) -> int: while True: lowercase__ : Dict = self.get_left_child_idx(_UpperCAmelCase ) # noqa: E741 lowercase__ : Union[str, Any] = self.get_right_child_idx(_UpperCAmelCase ) lowercase__ : List[str] = idx if l < len(_UpperCAmelCase ) and array[l] < array[idx]: lowercase__ : Optional[Any] = l if r < len(_UpperCAmelCase ) and array[r] < array[smallest]: lowercase__ : str = r if smallest != idx: lowercase__ , lowercase__ : Tuple = array[smallest], array[idx] ( ( lowercase__ ) , ( lowercase__ ) , ) : Tuple = ( self.idx_of_element[array[smallest]], self.idx_of_element[array[idx]], ) lowercase__ : Optional[int] = smallest else: break def _lowerCAmelCase( self , __lowerCAmelCase ) -> List[str]: lowercase__ : List[str] = self.get_parent_idx(_UpperCAmelCase ) while p >= 0 and self.heap[p] > self.heap[idx]: lowercase__ , lowercase__ : Optional[int] = self.heap[idx], self.heap[p] lowercase__ , lowercase__ : Tuple = ( self.idx_of_element[self.heap[idx]], self.idx_of_element[self.heap[p]], ) lowercase__ : Tuple = p lowercase__ : Tuple = self.get_parent_idx(_UpperCAmelCase ) def _lowerCAmelCase( self ) -> List[str]: return self.heap[0] def _lowerCAmelCase( self ) -> Optional[int]: lowercase__ , lowercase__ : int = self.heap[-1], self.heap[0] lowercase__ , lowercase__ : str = ( self.idx_of_element[self.heap[-1]], self.idx_of_element[self.heap[0]], ) lowercase__ : str = self.heap.pop() del self.idx_of_element[x] self.sift_down(0 , self.heap ) return x def _lowerCAmelCase( self , __lowerCAmelCase ) -> Union[str, Any]: self.heap.append(_UpperCAmelCase ) lowercase__ : Tuple = len(self.heap ) - 1 lowercase__ : Union[str, Any] = node.val self.sift_up(len(self.heap ) - 1 ) def _lowerCAmelCase( self ) -> List[str]: return len(self.heap ) == 0 def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase ) -> str: assert ( self.heap[self.idx_of_element[node]].val > new_value ), "newValue must be less that current value" lowercase__ : List[str] = new_value lowercase__ : Union[str, Any] = new_value self.sift_up(self.idx_of_element[node] ) __a: Any = Node("""R""", -1) __a: List[str] = Node("""B""", 6) __a: Dict = Node("""A""", 3) __a: int = Node("""X""", 1) __a: Tuple = Node("""E""", 4) # Use one of these two ways to generate Min-Heap # Generating Min-Heap from array __a: Optional[int] = MinHeap([r, b, a, x, e]) # Generating Min-Heap by Insert method # myMinHeap.insert(a) # myMinHeap.insert(b) # myMinHeap.insert(x) # myMinHeap.insert(r) # myMinHeap.insert(e) # Before print("""Min Heap - before decrease key""") for i in my_min_heap.heap: print(i) print("""Min Heap - After decrease key of node [B -> -17]""") my_min_heap.decrease_key(b, -17) # After for i in my_min_heap.heap: print(i) if __name__ == "__main__": import doctest doctest.testmod()
198
'''simple docstring''' import string from math import logaa def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' UpperCAmelCase__ = document.translate( str.maketrans("""""" , """""" , string.punctuation ) ).replace("""\n""" , """""" ) UpperCAmelCase__ = document_without_punctuation.split(""" """ ) # word tokenization return len([word for word in tokenize_document if word.lower() == term.lower()] ) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' UpperCAmelCase__ = corpus.lower().translate( str.maketrans("""""" , """""" , string.punctuation ) ) # strip all punctuation and replace it with '' UpperCAmelCase__ = corpus_without_punctuation.split("""\n""" ) UpperCAmelCase__ = term.lower() return (len([doc for doc in docs if term in doc] ), len(SCREAMING_SNAKE_CASE__ )) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple=False ): '''simple docstring''' if smoothing: if n == 0: raise ValueError("""log10(0) is undefined.""" ) return round(1 + logaa(n / (1 + df) ) , 3 ) if df == 0: raise ZeroDivisionError("""df must be > 0""" ) elif n == 0: raise ValueError("""log10(0) is undefined.""" ) return round(logaa(n / df ) , 3 ) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' return round(tf * idf , 3 )
346
0
import argparse import torch from transformers import ( WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaForAudioFrameClassification, WavaVecaForSequenceClassification, WavaVecaForXVector, logging, ) logging.set_verbosity_info() lowerCamelCase : str =logging.get_logger(__name__) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Any: UpperCamelCase__ : Dict = WavaVecaForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE__ , config=SCREAMING_SNAKE_CASE__ ) UpperCamelCase__ : int = downstream_dict["projector.weight"] UpperCamelCase__ : Union[str, Any] = downstream_dict["projector.bias"] UpperCamelCase__ : List[str] = downstream_dict["model.post_net.linear.weight"] UpperCamelCase__ : List[Any] = downstream_dict["model.post_net.linear.bias"] return model def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]: UpperCamelCase__ : Any = WavaVecaForAudioFrameClassification.from_pretrained(SCREAMING_SNAKE_CASE__ , config=SCREAMING_SNAKE_CASE__ ) UpperCamelCase__ : Optional[int] = downstream_dict["model.linear.weight"] UpperCamelCase__ : str = downstream_dict["model.linear.bias"] return model def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[str]: UpperCamelCase__ : List[str] = WavaVecaForXVector.from_pretrained(SCREAMING_SNAKE_CASE__ , config=SCREAMING_SNAKE_CASE__ ) UpperCamelCase__ : List[str] = downstream_dict["connector.weight"] UpperCamelCase__ : List[str] = downstream_dict["connector.bias"] for i, kernel_size in enumerate(hf_config.tdnn_kernel ): UpperCamelCase__ : Any = downstream_dict[ f'model.framelevel_feature_extractor.module.{i}.kernel.weight' ] UpperCamelCase__ : Union[str, Any] = downstream_dict[f'model.framelevel_feature_extractor.module.{i}.kernel.bias'] UpperCamelCase__ : List[Any] = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"] UpperCamelCase__ : List[Any] = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"] UpperCamelCase__ : str = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"] UpperCamelCase__ : int = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"] UpperCamelCase__ : int = downstream_dict["objective.W"] return model @torch.no_grad() def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Tuple: UpperCamelCase__ : Optional[Any] = torch.load(SCREAMING_SNAKE_CASE__ , map_location="cpu" ) UpperCamelCase__ : str = checkpoint["Downstream"] UpperCamelCase__ : Union[str, Any] = WavaVecaConfig.from_pretrained(SCREAMING_SNAKE_CASE__ ) UpperCamelCase__ : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained( SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , do_normalize=SCREAMING_SNAKE_CASE__ ) UpperCamelCase__ : int = hf_config.architectures[0] if arch.endswith("ForSequenceClassification" ): UpperCamelCase__ : Any = convert_classification(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) elif arch.endswith("ForAudioFrameClassification" ): UpperCamelCase__ : str = convert_diarization(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) elif arch.endswith("ForXVector" ): UpperCamelCase__ : Optional[Any] = convert_xvector(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else: raise NotImplementedError(f'S3PRL weights conversion is not supported for {arch}' ) if hf_config.use_weighted_layer_sum: UpperCamelCase__ : str = checkpoint["Featurizer"]["weights"] hf_feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE__ ) hf_model.save_pretrained(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": lowerCamelCase : List[Any] =argparse.ArgumentParser() parser.add_argument( '''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.''' ) parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''') parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''') lowerCamelCase : List[str] =parser.parse_args() convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
189
'''simple docstring''' import argparse import torch from transformers import BertForMaskedLM if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser( description=( 'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned' ' Distillation' ) ) parser.add_argument('--model_type', default='bert', choices=['bert']) parser.add_argument('--model_name', default='bert-base-uncased', type=str) parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str) parser.add_argument('--vocab_transform', action='store_true') UpperCAmelCase_ = parser.parse_args() if args.model_type == "bert": UpperCAmelCase_ = BertForMaskedLM.from_pretrained(args.model_name) UpperCAmelCase_ = 'bert' else: raise ValueError('args.model_type should be "bert".') UpperCAmelCase_ = model.state_dict() UpperCAmelCase_ = {} for w in ["word_embeddings", "position_embeddings"]: UpperCAmelCase_ = state_dict[f"{prefix}.embeddings.{w}.weight"] for w in ["weight", "bias"]: UpperCAmelCase_ = state_dict[f"{prefix}.embeddings.LayerNorm.{w}"] UpperCAmelCase_ = 0 for teacher_idx in [0, 2, 4, 7, 9, 1_1]: for w in ["weight", "bias"]: UpperCAmelCase_ = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}" ] UpperCAmelCase_ = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}" ] UpperCAmelCase_ = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}" ] UpperCAmelCase_ = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}" ] UpperCAmelCase_ = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}" ] UpperCAmelCase_ = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}" ] UpperCAmelCase_ = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}" ] UpperCAmelCase_ = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}" ] std_idx += 1 UpperCAmelCase_ = state_dict['cls.predictions.decoder.weight'] UpperCAmelCase_ = state_dict['cls.predictions.bias'] if args.vocab_transform: for w in ["weight", "bias"]: UpperCAmelCase_ = state_dict[f"cls.predictions.transform.dense.{w}"] UpperCAmelCase_ = state_dict[f"cls.predictions.transform.LayerNorm.{w}"] print(f"N layers selected for distillation: {std_idx}") print(f"Number of params transferred for distillation: {len(compressed_sd.keys())}") print(f"Save transferred checkpoint to {args.dump_checkpoint}.") torch.save(compressed_sd, args.dump_checkpoint)
346
0
import os import shutil import sys import tempfile import unittest from pathlib import Path import pytest import transformers from transformers import ( BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoTokenizer, BertConfig, BertTokenizer, BertTokenizerFast, CTRLTokenizer, GPTaTokenizer, GPTaTokenizerFast, PreTrainedTokenizerFast, RobertaTokenizer, RobertaTokenizerFast, is_tokenizers_available, ) from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig from transformers.models.auto.tokenization_auto import ( TOKENIZER_MAPPING, get_tokenizer_config, tokenizer_class_from_name, ) from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.testing_utils import ( DUMMY_DIFF_TOKENIZER_IDENTIFIER, DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, RequestCounter, require_tokenizers, slow, ) sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils""")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class a__ ( unittest.TestCase ): def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = 0 @slow def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x): __lowerCAmelCase = AutoTokenizer.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase , (BertTokenizer, BertTokenizerFast) ) self.assertGreater(len(_UpperCAmelCase ) , 0 ) for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys(): __lowerCAmelCase = AutoTokenizer.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase , (GPTaTokenizer, GPTaTokenizerFast) ) self.assertGreater(len(_UpperCAmelCase ) , 0 ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = AutoTokenizer.from_pretrained(_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 1_2 ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = AutoTokenizer.from_pretrained(_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase , (RobertaTokenizer, RobertaTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 2_0 ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = AutoConfig.from_pretrained(_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase ) # Check that tokenizer_type ≠ model_type __lowerCAmelCase = AutoTokenizer.from_pretrained(_UpperCAmelCase , config=_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 1_2 ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy("./tests/fixtures/vocab.txt" , os.path.join(_UpperCAmelCase , "vocab.txt" ) ) __lowerCAmelCase = AutoTokenizer.from_pretrained(_UpperCAmelCase , tokenizer_type="bert" , use_fast=_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy("./tests/fixtures/vocab.json" , os.path.join(_UpperCAmelCase , "vocab.json" ) ) shutil.copy("./tests/fixtures/merges.txt" , os.path.join(_UpperCAmelCase , "merges.txt" ) ) __lowerCAmelCase = AutoTokenizer.from_pretrained(_UpperCAmelCase , tokenizer_type="gpt2" , use_fast=_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase ) @require_tokenizers def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy("./tests/fixtures/vocab.txt" , os.path.join(_UpperCAmelCase , "vocab.txt" ) ) __lowerCAmelCase = AutoTokenizer.from_pretrained(_UpperCAmelCase , tokenizer_type="bert" ) self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy("./tests/fixtures/vocab.json" , os.path.join(_UpperCAmelCase , "vocab.json" ) ) shutil.copy("./tests/fixtures/merges.txt" , os.path.join(_UpperCAmelCase , "merges.txt" ) ) __lowerCAmelCase = AutoTokenizer.from_pretrained(_UpperCAmelCase , tokenizer_type="gpt2" ) self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" with pytest.raises(_UpperCAmelCase ): AutoTokenizer.from_pretrained("./" , tokenizer_type="xxx" ) @require_tokenizers def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: __lowerCAmelCase = tokenizer_class.from_pretrained("wietsedv/bert-base-dutch-cased" ) self.assertIsInstance(_UpperCAmelCase , (BertTokenizer, BertTokenizerFast) ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ): self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , _UpperCAmelCase ) else: self.assertEqual(tokenizer.do_lower_case , _UpperCAmelCase ) self.assertEqual(tokenizer.model_max_length , 5_1_2 ) @require_tokenizers def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: with self.assertRaisesRegex( _UpperCAmelCase , "julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier" , ): __lowerCAmelCase = tokenizer_class.from_pretrained("julien-c/herlolip-not-exists" ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = TOKENIZER_MAPPING.values() __lowerCAmelCase = [] for slow_tok, fast_tok in tokenizers: if slow_tok is not None: tokenizer_names.append(slow_tok.__name__ ) if fast_tok is not None: tokenizer_names.append(fast_tok.__name__ ) for tokenizer_name in tokenizer_names: # must find the right class tokenizer_class_from_name(_UpperCAmelCase ) @require_tokenizers def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" self.assertIsInstance(AutoTokenizer.from_pretrained("bert-base-cased" , use_fast=_UpperCAmelCase ) , _UpperCAmelCase ) self.assertIsInstance(AutoTokenizer.from_pretrained("bert-base-cased" ) , _UpperCAmelCase ) @require_tokenizers def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = AutoTokenizer.from_pretrained("distilbert-base-uncased" , do_lower_case=_UpperCAmelCase ) __lowerCAmelCase = "Hello, world. How are you?" __lowerCAmelCase = tokenizer.tokenize(_UpperCAmelCase ) self.assertEqual("[UNK]" , tokens[0] ) __lowerCAmelCase = AutoTokenizer.from_pretrained("microsoft/mpnet-base" , do_lower_case=_UpperCAmelCase ) __lowerCAmelCase = tokenizer.tokenize(_UpperCAmelCase ) self.assertEqual("[UNK]" , tokens[0] ) @require_tokenizers def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = AutoTokenizer.from_pretrained("robot-test/dummy-tokenizer-fast-with-model-config" ) self.assertEqual(type(_UpperCAmelCase ) , _UpperCAmelCase ) self.assertEqual(tokenizer.model_max_length , 5_1_2 ) self.assertEqual(tokenizer.vocab_size , 3_0_0_0_0 ) self.assertEqual(tokenizer.unk_token , "[UNK]" ) self.assertEqual(tokenizer.padding_side , "right" ) self.assertEqual(tokenizer.truncation_side , "right" ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = AutoTokenizer.from_pretrained(_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase , (BertTokenizer, BertTokenizerFast) ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_UpperCAmelCase ) __lowerCAmelCase = AutoTokenizer.from_pretrained(_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase , tokenizer.__class__ ) self.assertEqual(tokenizera.vocab_size , 1_2 ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = AutoTokenizer.from_pretrained("ctrl" ) # There is no fast CTRL so this always gives us a slow tokenizer. self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = get_tokenizer_config("bert-base-cased" ) __lowerCAmelCase = config.pop("_commit_hash" , _UpperCAmelCase ) # If we ever update bert-base-cased tokenizer config, this dict here will need to be updated. self.assertEqual(_UpperCAmelCase , {"do_lower_case": False} ) # This model does not have a tokenizer_config so we get back an empty dict. __lowerCAmelCase = get_tokenizer_config(_UpperCAmelCase ) self.assertDictEqual(_UpperCAmelCase , {} ) # A tokenizer saved with `save_pretrained` always creates a tokenizer config. __lowerCAmelCase = AutoTokenizer.from_pretrained(_UpperCAmelCase ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_UpperCAmelCase ) __lowerCAmelCase = get_tokenizer_config(_UpperCAmelCase ) # Check the class of the tokenizer was properly saved (note that it always saves the slow class). self.assertEqual(config["tokenizer_class"] , "BertTokenizer" ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" try: AutoConfig.register("custom" , _UpperCAmelCase ) AutoTokenizer.register(_UpperCAmelCase , slow_tokenizer_class=_UpperCAmelCase ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(_UpperCAmelCase ): AutoTokenizer.register(_UpperCAmelCase , slow_tokenizer_class=_UpperCAmelCase ) __lowerCAmelCase = CustomTokenizer.from_pretrained(_UpperCAmelCase ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_UpperCAmelCase ) __lowerCAmelCase = AutoTokenizer.from_pretrained(_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] @require_tokenizers def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" try: AutoConfig.register("custom" , _UpperCAmelCase ) # Can register in two steps AutoTokenizer.register(_UpperCAmelCase , slow_tokenizer_class=_UpperCAmelCase ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) ) AutoTokenizer.register(_UpperCAmelCase , fast_tokenizer_class=_UpperCAmelCase ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) ) del TOKENIZER_MAPPING._extra_content[CustomConfig] # Can register in one step AutoTokenizer.register( _UpperCAmelCase , slow_tokenizer_class=_UpperCAmelCase , fast_tokenizer_class=_UpperCAmelCase ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(_UpperCAmelCase ): AutoTokenizer.register(_UpperCAmelCase , fast_tokenizer_class=_UpperCAmelCase ) # We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer # and that model does not have a tokenizer.json with tempfile.TemporaryDirectory() as tmp_dir: __lowerCAmelCase = BertTokenizerFast.from_pretrained(_UpperCAmelCase ) bert_tokenizer.save_pretrained(_UpperCAmelCase ) __lowerCAmelCase = CustomTokenizerFast.from_pretrained(_UpperCAmelCase ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_UpperCAmelCase ) __lowerCAmelCase = AutoTokenizer.from_pretrained(_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase ) __lowerCAmelCase = AutoTokenizer.from_pretrained(_UpperCAmelCase , use_fast=_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" with self.assertRaises(_UpperCAmelCase ): __lowerCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" ) # If remote code is disabled, we can't load this config. with self.assertRaises(_UpperCAmelCase ): __lowerCAmelCase = AutoTokenizer.from_pretrained( "hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=_UpperCAmelCase ) __lowerCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=_UpperCAmelCase ) self.assertTrue(tokenizer.special_attribute_present ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_UpperCAmelCase ) __lowerCAmelCase = AutoTokenizer.from_pretrained(_UpperCAmelCase , trust_remote_code=_UpperCAmelCase ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , "NewTokenizerFast" ) # Test we can also load the slow version __lowerCAmelCase = AutoTokenizer.from_pretrained( "hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=_UpperCAmelCase , use_fast=_UpperCAmelCase ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_UpperCAmelCase ) __lowerCAmelCase = AutoTokenizer.from_pretrained(_UpperCAmelCase , trust_remote_code=_UpperCAmelCase , use_fast=_UpperCAmelCase ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , "NewTokenizer" ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) else: self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , "NewTokenizer" ) @require_tokenizers def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" class a__ ( lowerCamelCase_ ): _a : Dict = False class a__ ( lowerCamelCase_ ): _a : int = NewTokenizer _a : Any = False try: AutoConfig.register("custom" , _UpperCAmelCase ) AutoTokenizer.register(_UpperCAmelCase , slow_tokenizer_class=_UpperCAmelCase ) AutoTokenizer.register(_UpperCAmelCase , fast_tokenizer_class=_UpperCAmelCase ) # If remote code is not set, the default is to use local __lowerCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" ) self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" ) self.assertFalse(tokenizer.special_attribute_present ) __lowerCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" , use_fast=_UpperCAmelCase ) self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" ) self.assertFalse(tokenizer.special_attribute_present ) # If remote code is disabled, we load the local one. __lowerCAmelCase = AutoTokenizer.from_pretrained( "hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=_UpperCAmelCase ) self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" ) self.assertFalse(tokenizer.special_attribute_present ) __lowerCAmelCase = AutoTokenizer.from_pretrained( "hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=_UpperCAmelCase , use_fast=_UpperCAmelCase ) self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" ) self.assertFalse(tokenizer.special_attribute_present ) # If remote is enabled, we load from the Hub __lowerCAmelCase = AutoTokenizer.from_pretrained( "hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=_UpperCAmelCase ) self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" ) self.assertTrue(tokenizer.special_attribute_present ) __lowerCAmelCase = AutoTokenizer.from_pretrained( "hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=_UpperCAmelCase , use_fast=_UpperCAmelCase ) self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" ) self.assertTrue(tokenizer.special_attribute_present ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = AutoTokenizer.from_pretrained( "hf-internal-testing/test_dynamic_tokenizer_legacy" , trust_remote_code=_UpperCAmelCase ) self.assertTrue(tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" ) # Test we can also load the slow version __lowerCAmelCase = AutoTokenizer.from_pretrained( "hf-internal-testing/test_dynamic_tokenizer_legacy" , trust_remote_code=_UpperCAmelCase , use_fast=_UpperCAmelCase ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" ) else: self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" with self.assertRaisesRegex( _UpperCAmelCase , "bert-base is not a local folder and is not a valid model identifier" ): __lowerCAmelCase = AutoTokenizer.from_pretrained("bert-base" ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" with self.assertRaisesRegex( _UpperCAmelCase , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ): __lowerCAmelCase = AutoTokenizer.from_pretrained(_UpperCAmelCase , revision="aaaaaa" ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" ) with RequestCounter() as counter: __lowerCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 )
92
'''simple docstring''' import tempfile import torch from diffusers import PNDMScheduler from .test_schedulers import SchedulerCommonTest class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' lowerCAmelCase_ : Union[str, Any] = (PNDMScheduler,) lowerCAmelCase_ : Optional[int] = (("""num_inference_steps""", 50),) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , **_UpperCAmelCase : Optional[int] ): """simple docstring""" UpperCAmelCase__ = { """num_train_timesteps""": 10_00, """beta_start""": 0.0001, """beta_end""": 0.02, """beta_schedule""": """linear""", } config.update(**_UpperCAmelCase ) return config def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : Tuple=0 , **_UpperCAmelCase : List[str] ): """simple docstring""" UpperCAmelCase__ = dict(self.forward_default_kwargs ) UpperCAmelCase__ = kwargs.pop("""num_inference_steps""" , _UpperCAmelCase ) UpperCAmelCase__ = self.dummy_sample UpperCAmelCase__ = 0.1 * sample UpperCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: UpperCAmelCase__ = self.get_scheduler_config(**_UpperCAmelCase ) UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residuals UpperCAmelCase__ = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_UpperCAmelCase ) UpperCAmelCase__ = scheduler_class.from_pretrained(_UpperCAmelCase ) new_scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residuals UpperCAmelCase__ = dummy_past_residuals[:] UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample UpperCAmelCase__ = new_scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample UpperCAmelCase__ = new_scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" pass def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : Union[str, Any]=0 , **_UpperCAmelCase : Optional[int] ): """simple docstring""" UpperCAmelCase__ = dict(self.forward_default_kwargs ) UpperCAmelCase__ = kwargs.pop("""num_inference_steps""" , _UpperCAmelCase ) UpperCAmelCase__ = self.dummy_sample UpperCAmelCase__ = 0.1 * sample UpperCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: UpperCAmelCase__ = self.get_scheduler_config() UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residuals (must be after setting timesteps) UpperCAmelCase__ = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_UpperCAmelCase ) UpperCAmelCase__ = scheduler_class.from_pretrained(_UpperCAmelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residual (must be after setting timesteps) UpperCAmelCase__ = dummy_past_residuals[:] UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample UpperCAmelCase__ = new_scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample UpperCAmelCase__ = new_scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def SCREAMING_SNAKE_CASE__ ( self : int , **_UpperCAmelCase : Tuple ): """simple docstring""" UpperCAmelCase__ = self.scheduler_classes[0] UpperCAmelCase__ = self.get_scheduler_config(**_UpperCAmelCase ) UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) UpperCAmelCase__ = 10 UpperCAmelCase__ = self.dummy_model() UpperCAmelCase__ = self.dummy_sample_deter scheduler.set_timesteps(_UpperCAmelCase ) for i, t in enumerate(scheduler.prk_timesteps ): UpperCAmelCase__ = model(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample for i, t in enumerate(scheduler.plms_timesteps ): UpperCAmelCase__ = model(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample return sample def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" UpperCAmelCase__ = dict(self.forward_default_kwargs ) UpperCAmelCase__ = kwargs.pop("""num_inference_steps""" , _UpperCAmelCase ) for scheduler_class in self.scheduler_classes: UpperCAmelCase__ = self.get_scheduler_config() UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) UpperCAmelCase__ = self.dummy_sample UpperCAmelCase__ = 0.1 * sample if num_inference_steps is not None and hasattr(_UpperCAmelCase , """set_timesteps""" ): scheduler.set_timesteps(_UpperCAmelCase ) elif num_inference_steps is not None and not hasattr(_UpperCAmelCase , """set_timesteps""" ): UpperCAmelCase__ = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) UpperCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] UpperCAmelCase__ = dummy_past_residuals[:] UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , 0 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , 1 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , 0 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , 1 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def SCREAMING_SNAKE_CASE__ ( self : int ): """simple docstring""" for timesteps in [1_00, 10_00]: self.check_over_configs(num_train_timesteps=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" for steps_offset in [0, 1]: self.check_over_configs(steps_offset=_UpperCAmelCase ) UpperCAmelCase__ = self.scheduler_classes[0] UpperCAmelCase__ = self.get_scheduler_config(steps_offset=1 ) UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(10 ) assert torch.equal( scheduler.timesteps , torch.LongTensor( [9_01, 8_51, 8_51, 8_01, 8_01, 7_51, 7_51, 7_01, 7_01, 6_51, 6_51, 6_01, 6_01, 5_01, 4_01, 3_01, 2_01, 1_01, 1] ) , ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): """simple docstring""" for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ): self.check_over_configs(beta_start=_UpperCAmelCase , beta_end=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" for t in [1, 5, 10]: self.check_over_forward(time_step=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00] ): self.check_over_forward(num_inference_steps=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" UpperCAmelCase__ = 27 for scheduler_class in self.scheduler_classes: UpperCAmelCase__ = self.dummy_sample UpperCAmelCase__ = 0.1 * sample UpperCAmelCase__ = self.get_scheduler_config() UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(_UpperCAmelCase ) # before power of 3 fix, would error on first step, so we only need to do two for i, t in enumerate(scheduler.prk_timesteps[:2] ): UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample def SCREAMING_SNAKE_CASE__ ( self : Dict ): """simple docstring""" with self.assertRaises(_UpperCAmelCase ): UpperCAmelCase__ = self.scheduler_classes[0] UpperCAmelCase__ = self.get_scheduler_config() UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): """simple docstring""" UpperCAmelCase__ = self.full_loop() UpperCAmelCase__ = torch.sum(torch.abs(_UpperCAmelCase ) ) UpperCAmelCase__ = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_sum.item() - 198.1318 ) < 1E-2 assert abs(result_mean.item() - 0.2580 ) < 1E-3 def SCREAMING_SNAKE_CASE__ ( self : Dict ): """simple docstring""" UpperCAmelCase__ = self.full_loop(prediction_type="""v_prediction""" ) UpperCAmelCase__ = torch.sum(torch.abs(_UpperCAmelCase ) ) UpperCAmelCase__ = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_sum.item() - 67.3986 ) < 1E-2 assert abs(result_mean.item() - 0.0878 ) < 1E-3 def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" UpperCAmelCase__ = self.full_loop(set_alpha_to_one=_UpperCAmelCase , beta_start=0.01 ) UpperCAmelCase__ = torch.sum(torch.abs(_UpperCAmelCase ) ) UpperCAmelCase__ = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_sum.item() - 230.0399 ) < 1E-2 assert abs(result_mean.item() - 0.2995 ) < 1E-3 def SCREAMING_SNAKE_CASE__ ( self : int ): """simple docstring""" UpperCAmelCase__ = self.full_loop(set_alpha_to_one=_UpperCAmelCase , beta_start=0.01 ) UpperCAmelCase__ = torch.sum(torch.abs(_UpperCAmelCase ) ) UpperCAmelCase__ = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_sum.item() - 186.9482 ) < 1E-2 assert abs(result_mean.item() - 0.2434 ) < 1E-3
346
0
import warnings from ...utils import is_sklearn_available, requires_backends if is_sklearn_available(): from scipy.stats import pearsonr, spearmanr from sklearn.metrics import fa_score, matthews_corrcoef lowercase__ : List[Any] = ( "This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate " "library. You can have a look at this example script for pointers: " "https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py" ) def lowerCamelCase__ ( _A , _A ): '''simple docstring''' warnings.warn(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) requires_backends(SCREAMING_SNAKE_CASE__ , "sklearn" ) return (preds == labels).mean() def lowerCamelCase__ ( _A , _A ): '''simple docstring''' warnings.warn(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) requires_backends(SCREAMING_SNAKE_CASE__ , "sklearn" ) snake_case_ = simple_accuracy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) snake_case_ = fa_score(y_true=SCREAMING_SNAKE_CASE__ , y_pred=SCREAMING_SNAKE_CASE__ ) return { "acc": acc, "f1": fa, "acc_and_f1": (acc + fa) / 2, } def lowerCamelCase__ ( _A , _A ): '''simple docstring''' warnings.warn(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) requires_backends(SCREAMING_SNAKE_CASE__ , "sklearn" ) snake_case_ = pearsonr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )[0] snake_case_ = spearmanr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )[0] return { "pearson": pearson_corr, "spearmanr": spearman_corr, "corr": (pearson_corr + spearman_corr) / 2, } def lowerCamelCase__ ( _A , _A , _A ): '''simple docstring''' warnings.warn(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) requires_backends(SCREAMING_SNAKE_CASE__ , "sklearn" ) assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ ), f"Predictions and labels have mismatched lengths {len(SCREAMING_SNAKE_CASE__ )} and {len(SCREAMING_SNAKE_CASE__ )}" if task_name == "cola": return {"mcc": matthews_corrcoef(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )} elif task_name == "sst-2": return {"acc": simple_accuracy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )} elif task_name == "mrpc": return acc_and_fa(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) elif task_name == "sts-b": return pearson_and_spearman(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) elif task_name == "qqp": return acc_and_fa(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) elif task_name == "mnli": return {"mnli/acc": simple_accuracy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )} elif task_name == "mnli-mm": return {"mnli-mm/acc": simple_accuracy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )} elif task_name == "qnli": return {"acc": simple_accuracy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )} elif task_name == "rte": return {"acc": simple_accuracy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )} elif task_name == "wnli": return {"acc": simple_accuracy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )} elif task_name == "hans": return {"acc": simple_accuracy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )} else: raise KeyError(SCREAMING_SNAKE_CASE__ ) def lowerCamelCase__ ( _A , _A , _A ): '''simple docstring''' warnings.warn(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) requires_backends(SCREAMING_SNAKE_CASE__ , "sklearn" ) if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ): raise ValueError(f"Predictions and labels have mismatched lengths {len(SCREAMING_SNAKE_CASE__ )} and {len(SCREAMING_SNAKE_CASE__ )}" ) if task_name == "xnli": return {"acc": simple_accuracy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )} else: raise KeyError(SCREAMING_SNAKE_CASE__ )
187
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { 'google/vivit-b-16x2-kinetics400': ( 'https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json' ), # See all Vivit models at https://huggingface.co/models?filter=vivit } class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' lowerCAmelCase_ : Optional[int] = """vivit""" def __init__( self : List[str] , _UpperCAmelCase : List[Any]=2_24 , _UpperCAmelCase : List[str]=32 , _UpperCAmelCase : Any=[2, 16, 16] , _UpperCAmelCase : int=3 , _UpperCAmelCase : Optional[Any]=7_68 , _UpperCAmelCase : Union[str, Any]=12 , _UpperCAmelCase : Dict=12 , _UpperCAmelCase : Optional[Any]=30_72 , _UpperCAmelCase : Optional[int]="gelu_fast" , _UpperCAmelCase : Union[str, Any]=0.0 , _UpperCAmelCase : Tuple=0.0 , _UpperCAmelCase : Optional[int]=0.02 , _UpperCAmelCase : List[Any]=1E-06 , _UpperCAmelCase : List[str]=True , **_UpperCAmelCase : List[Any] , ): """simple docstring""" UpperCAmelCase__ = hidden_size UpperCAmelCase__ = num_hidden_layers UpperCAmelCase__ = num_attention_heads UpperCAmelCase__ = intermediate_size UpperCAmelCase__ = hidden_act UpperCAmelCase__ = hidden_dropout_prob UpperCAmelCase__ = attention_probs_dropout_prob UpperCAmelCase__ = initializer_range UpperCAmelCase__ = layer_norm_eps UpperCAmelCase__ = image_size UpperCAmelCase__ = num_frames UpperCAmelCase__ = tubelet_size UpperCAmelCase__ = num_channels UpperCAmelCase__ = qkv_bias super().__init__(**_UpperCAmelCase )
346
0
'''simple docstring''' import argparse import os import re import packaging.version lowerCAmelCase__ = '''examples/''' lowerCAmelCase__ = { '''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''), '''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''), '''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''), '''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''), } lowerCAmelCase__ = { '''init''': '''src/diffusers/__init__.py''', '''setup''': '''setup.py''', } lowerCAmelCase__ = '''README.md''' def _A ( A__ , A__ , A__ ): """simple docstring""" with open(SCREAMING_SNAKE_CASE__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: __lowercase = f.read() __lowercase , __lowercase = REPLACE_PATTERNS[pattern] __lowercase = replace.replace('''VERSION''' , SCREAMING_SNAKE_CASE__ ) __lowercase = re_pattern.sub(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) with open(SCREAMING_SNAKE_CASE__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.write(SCREAMING_SNAKE_CASE__ ) def _A ( A__ ): """simple docstring""" for folder, directories, fnames in os.walk(SCREAMING_SNAKE_CASE__ ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove('''research_projects''' ) if "legacy" in directories: directories.remove('''legacy''' ) for fname in fnames: if fname.endswith('''.py''' ): update_version_in_file(os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , pattern='''examples''' ) def _A ( A__ , A__=False ): """simple docstring""" for pattern, fname in REPLACE_FILES.items(): update_version_in_file(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if not patch: update_version_in_examples(SCREAMING_SNAKE_CASE__ ) def _A ( ): """simple docstring""" __lowercase = '''🤗 Transformers currently provides the following architectures''' __lowercase = '''1. Want to contribute a new model?''' with open(SCREAMING_SNAKE_CASE__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: __lowercase = f.readlines() # Find the start of the list. __lowercase = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 __lowercase = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith('''1.''' ): __lowercase = lines[index].replace( '''https://huggingface.co/docs/diffusers/main/model_doc''' , '''https://huggingface.co/docs/diffusers/model_doc''' , ) index += 1 with open(SCREAMING_SNAKE_CASE__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.writelines(SCREAMING_SNAKE_CASE__ ) def _A ( ): """simple docstring""" with open(REPLACE_FILES['''init'''] , '''r''' ) as f: __lowercase = f.read() __lowercase = REPLACE_PATTERNS['''init'''][0].search(SCREAMING_SNAKE_CASE__ ).groups()[0] return packaging.version.parse(SCREAMING_SNAKE_CASE__ ) def _A ( A__=False ): """simple docstring""" __lowercase = get_version() if patch and default_version.is_devrelease: raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' ) if default_version.is_devrelease: __lowercase = default_version.base_version elif patch: __lowercase = F"{default_version.major}.{default_version.minor}.{default_version.micro + 1}" else: __lowercase = F"{default_version.major}.{default_version.minor + 1}.0" # Now let's ask nicely if that's the right one. __lowercase = input(F"Which version are you releasing? [{default_version}]" ) if len(SCREAMING_SNAKE_CASE__ ) == 0: __lowercase = default_version print(F"Updating version to {version}." ) global_version_update(SCREAMING_SNAKE_CASE__ , patch=SCREAMING_SNAKE_CASE__ ) def _A ( ): """simple docstring""" __lowercase = get_version() __lowercase = F"{current_version.major}.{current_version.minor + 1}.0.dev0" __lowercase = current_version.base_version # Check with the user we got that right. __lowercase = input(F"Which version are we developing now? [{dev_version}]" ) if len(SCREAMING_SNAKE_CASE__ ) == 0: __lowercase = dev_version print(F"Updating version to {version}." ) global_version_update(SCREAMING_SNAKE_CASE__ ) # print("Cleaning main README, don't forget to run `make fix-copies`.") # clean_main_ref_in_model_list() if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''') parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''') lowerCAmelCase__ = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print('''Nothing to do after a patch :-)''') else: post_release_work()
104
'''simple docstring''' import warnings from ...utils import logging from .image_processing_deit import DeiTImageProcessor UpperCAmelCase_ = logging.get_logger(__name__) class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' def __init__( self : List[str] , *_UpperCAmelCase : Optional[int] , **_UpperCAmelCase : Optional[Any] ): """simple docstring""" warnings.warn( """The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use DeiTImageProcessor instead.""" , _UpperCAmelCase , ) super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
346
0
'''simple docstring''' import torch from diffusers import UnCLIPScheduler from .test_schedulers import SchedulerCommonTest class _a ( lowerCamelCase_ ): '''simple docstring''' A : Dict = (UnCLIPScheduler,) def UpperCamelCase_ ( self, **A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = { 'num_train_timesteps': 1_000, 'variance_type': 'fixed_small_log', 'clip_sample': True, 'clip_sample_range': 1.0, 'prediction_type': 'epsilon', } config.update(**_UpperCAmelCase ) return config def UpperCamelCase_ ( self ): '''simple docstring''' for timesteps in [1, 5, 100, 1_000]: self.check_over_configs(num_train_timesteps=_UpperCAmelCase ) def UpperCamelCase_ ( self ): '''simple docstring''' for variance in ["fixed_small_log", "learned_range"]: self.check_over_configs(variance_type=_UpperCAmelCase ) def UpperCamelCase_ ( self ): '''simple docstring''' for clip_sample in [True, False]: self.check_over_configs(clip_sample=_UpperCAmelCase ) def UpperCamelCase_ ( self ): '''simple docstring''' for clip_sample_range in [1, 5, 10, 20]: self.check_over_configs(clip_sample_range=_UpperCAmelCase ) def UpperCamelCase_ ( self ): '''simple docstring''' for prediction_type in ["epsilon", "sample"]: self.check_over_configs(prediction_type=_UpperCAmelCase ) def UpperCamelCase_ ( self ): '''simple docstring''' for time_step in [0, 500, 999]: for prev_timestep in [None, 5, 100, 250, 500, 750]: if prev_timestep is not None and prev_timestep >= time_step: continue self.check_over_forward(time_step=_UpperCAmelCase, prev_timestep=_UpperCAmelCase ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : int = self.get_scheduler_config(variance_type='fixed_small_log' ) SCREAMING_SNAKE_CASE : str = scheduler_class(**_UpperCAmelCase ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000E-10 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_54_96_25 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_99_49_87 ) ) < 1E-5 def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : str = self.get_scheduler_config(variance_type='learned_range' ) SCREAMING_SNAKE_CASE : List[str] = scheduler_class(**_UpperCAmelCase ) SCREAMING_SNAKE_CASE : Any = 0.5 assert scheduler._get_variance(1, predicted_variance=_UpperCAmelCase ) - -10.1_71_27_90 < 1E-5 assert scheduler._get_variance(487, predicted_variance=_UpperCAmelCase ) - -5.7_99_80_52 < 1E-5 assert scheduler._get_variance(999, predicted_variance=_UpperCAmelCase ) - -0.0_01_00_11 < 1E-5 def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : str = self.get_scheduler_config() SCREAMING_SNAKE_CASE : Dict = scheduler_class(**_UpperCAmelCase ) SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.timesteps SCREAMING_SNAKE_CASE : Any = self.dummy_model() SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_sample_deter SCREAMING_SNAKE_CASE : int = torch.manual_seed(0 ) for i, t in enumerate(_UpperCAmelCase ): # 1. predict noise residual SCREAMING_SNAKE_CASE : Dict = model(_UpperCAmelCase, _UpperCAmelCase ) # 2. predict previous mean of sample x_t-1 SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, generator=_UpperCAmelCase ).prev_sample SCREAMING_SNAKE_CASE : int = pred_prev_sample SCREAMING_SNAKE_CASE : Optional[Any] = torch.sum(torch.abs(_UpperCAmelCase ) ) SCREAMING_SNAKE_CASE : Union[str, Any] = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_sum.item() - 2_52.2_68_24_95 ) < 1E-2 assert abs(result_mean.item() - 0.3_28_47_43 ) < 1E-3 def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : Optional[int] = self.get_scheduler_config() SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(25 ) SCREAMING_SNAKE_CASE : Dict = scheduler.timesteps SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_model() SCREAMING_SNAKE_CASE : List[Any] = self.dummy_sample_deter SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(0 ) for i, t in enumerate(_UpperCAmelCase ): # 1. predict noise residual SCREAMING_SNAKE_CASE : Union[str, Any] = model(_UpperCAmelCase, _UpperCAmelCase ) if i + 1 == timesteps.shape[0]: SCREAMING_SNAKE_CASE : Tuple = None else: SCREAMING_SNAKE_CASE : Optional[int] = timesteps[i + 1] # 2. predict previous mean of sample x_t-1 SCREAMING_SNAKE_CASE : List[Any] = scheduler.step( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, prev_timestep=_UpperCAmelCase, generator=_UpperCAmelCase ).prev_sample SCREAMING_SNAKE_CASE : Union[str, Any] = pred_prev_sample SCREAMING_SNAKE_CASE : Optional[int] = torch.sum(torch.abs(_UpperCAmelCase ) ) SCREAMING_SNAKE_CASE : Optional[int] = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_sum.item() - 2_58.2_04_49_83 ) < 1E-2 assert abs(result_mean.item() - 0.3_36_20_38 ) < 1E-3 def UpperCamelCase_ ( self ): '''simple docstring''' pass def UpperCamelCase_ ( self ): '''simple docstring''' pass
251
'''simple docstring''' import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = {'vocab_file': 'spiece.model'} UpperCAmelCase_ = { 'vocab_file': { 'TsinghuaAI/CPM-Generate': 'https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model', } } class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' def __init__( self : Dict , _UpperCAmelCase : str , _UpperCAmelCase : Any=False , _UpperCAmelCase : int=True , _UpperCAmelCase : Union[str, Any]=False , _UpperCAmelCase : Dict="<s>" , _UpperCAmelCase : int="</s>" , _UpperCAmelCase : Dict="<unk>" , _UpperCAmelCase : Tuple="<sep>" , _UpperCAmelCase : List[Any]="<pad>" , _UpperCAmelCase : int="<cls>" , _UpperCAmelCase : Union[str, Any]="<mask>" , _UpperCAmelCase : List[str]=["<eop>", "<eod>"] , _UpperCAmelCase : Optional[Dict[str, Any]] = None , **_UpperCAmelCase : int , ): """simple docstring""" UpperCAmelCase__ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token UpperCAmelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=_UpperCAmelCase , remove_space=_UpperCAmelCase , keep_accents=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , ) UpperCAmelCase__ = 3 UpperCAmelCase__ = do_lower_case UpperCAmelCase__ = remove_space UpperCAmelCase__ = keep_accents UpperCAmelCase__ = vocab_file UpperCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_UpperCAmelCase ) try: import jieba except ModuleNotFoundError as error: raise error.__class__( """You need to install jieba to use CpmTokenizer or CpmTokenizerFast. """ """See https://pypi.org/project/jieba/ for installation.""" ) UpperCAmelCase__ = jieba UpperCAmelCase__ = str.maketrans(""" \n""" , """\u2582\u2583""" ) @property # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" return len(self.sp_model ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" UpperCAmelCase__ = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Dict ): """simple docstring""" UpperCAmelCase__ = self.__dict__.copy() UpperCAmelCase__ = None return state def __setstate__( self : Union[str, Any] , _UpperCAmelCase : Union[str, Any] ): """simple docstring""" UpperCAmelCase__ = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): UpperCAmelCase__ = {} UpperCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : Optional[Any] ): """simple docstring""" if self.remove_space: UpperCAmelCase__ = """ """.join(inputs.strip().split() ) else: UpperCAmelCase__ = inputs UpperCAmelCase__ = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" ) if not self.keep_accents: UpperCAmelCase__ = unicodedata.normalize("""NFKD""" , _UpperCAmelCase ) UpperCAmelCase__ = """""".join([c for c in outputs if not unicodedata.combining(_UpperCAmelCase )] ) if self.do_lower_case: UpperCAmelCase__ = outputs.lower() return outputs def SCREAMING_SNAKE_CASE__ ( self : Tuple , _UpperCAmelCase : str ): """simple docstring""" UpperCAmelCase__ = self.preprocess_text(_UpperCAmelCase ) UpperCAmelCase__ = self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase ) UpperCAmelCase__ = [] for piece in pieces: if len(_UpperCAmelCase ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit(): UpperCAmelCase__ = self.sp_model.EncodeAsPieces(piece[:-1].replace(_UpperCAmelCase , """""" ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: UpperCAmelCase__ = cur_pieces[1:] else: UpperCAmelCase__ = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(_UpperCAmelCase ) else: new_pieces.append(_UpperCAmelCase ) return new_pieces def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : Union[str, Any] ): """simple docstring""" return self.sp_model.PieceToId(_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : Any ): """simple docstring""" return self.sp_model.IdToPiece(_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : Dict ): """simple docstring""" UpperCAmelCase__ = """""".join(_UpperCAmelCase ).replace(_UpperCAmelCase , """ """ ).strip() return out_string def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ): """simple docstring""" UpperCAmelCase__ = [self.sep_token_id] UpperCAmelCase__ = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None , _UpperCAmelCase : bool = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase ) if token_ids_a is not None: return ([0] * len(_UpperCAmelCase )) + [1] + ([0] * len(_UpperCAmelCase )) + [1, 1] return ([0] * len(_UpperCAmelCase )) + [1, 1] def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ): """simple docstring""" UpperCAmelCase__ = [self.sep_token_id] UpperCAmelCase__ = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ): """simple docstring""" if not os.path.isdir(_UpperCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return UpperCAmelCase__ = os.path.join( _UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(_UpperCAmelCase , """wb""" ) as fi: UpperCAmelCase__ = self.sp_model.serialized_model_proto() fi.write(_UpperCAmelCase ) return (out_vocab_file,) def SCREAMING_SNAKE_CASE__ ( self : Tuple , *_UpperCAmelCase : Tuple , **_UpperCAmelCase : Optional[Any] ): """simple docstring""" UpperCAmelCase__ = super()._decode(*_UpperCAmelCase , **_UpperCAmelCase ) UpperCAmelCase__ = text.replace(""" """ , """""" ).replace("""\u2582""" , """ """ ).replace("""\u2583""" , """\n""" ) return text
346
0
"""simple docstring""" from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType __snake_case : Dict = logging.get_logger(__name__) __snake_case : List[str] = { 'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json', 'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json', 'microsoft/deberta-v2-xlarge-mnli': ( 'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json' ), 'microsoft/deberta-v2-xxlarge-mnli': ( 'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json' ), } class A__ ( lowerCamelCase_ ): '''simple docstring''' SCREAMING_SNAKE_CASE = """deberta-v2""" def __init__( self: str , _SCREAMING_SNAKE_CASE: Tuple=12_8100 , _SCREAMING_SNAKE_CASE: str=1536 , _SCREAMING_SNAKE_CASE: List[Any]=24 , _SCREAMING_SNAKE_CASE: List[Any]=24 , _SCREAMING_SNAKE_CASE: List[str]=6144 , _SCREAMING_SNAKE_CASE: Any="gelu" , _SCREAMING_SNAKE_CASE: int=0.1 , _SCREAMING_SNAKE_CASE: Dict=0.1 , _SCREAMING_SNAKE_CASE: Any=512 , _SCREAMING_SNAKE_CASE: List[Any]=0 , _SCREAMING_SNAKE_CASE: Optional[Any]=0.02 , _SCREAMING_SNAKE_CASE: str=1e-7 , _SCREAMING_SNAKE_CASE: Any=False , _SCREAMING_SNAKE_CASE: Tuple=-1 , _SCREAMING_SNAKE_CASE: str=0 , _SCREAMING_SNAKE_CASE: int=True , _SCREAMING_SNAKE_CASE: str=None , _SCREAMING_SNAKE_CASE: Union[str, Any]=0 , _SCREAMING_SNAKE_CASE: Union[str, Any]="gelu" , **_SCREAMING_SNAKE_CASE: str , ) -> Dict: """simple docstring""" super().__init__(**_UpperCAmelCase) __lowerCAmelCase : List[str] = hidden_size __lowerCAmelCase : Tuple = num_hidden_layers __lowerCAmelCase : Optional[int] = num_attention_heads __lowerCAmelCase : str = intermediate_size __lowerCAmelCase : int = hidden_act __lowerCAmelCase : Optional[Any] = hidden_dropout_prob __lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob __lowerCAmelCase : Optional[Any] = max_position_embeddings __lowerCAmelCase : Dict = type_vocab_size __lowerCAmelCase : Tuple = initializer_range __lowerCAmelCase : Dict = relative_attention __lowerCAmelCase : Dict = max_relative_positions __lowerCAmelCase : str = pad_token_id __lowerCAmelCase : List[str] = position_biased_input # Backwards compatibility if type(_UpperCAmelCase) == str: __lowerCAmelCase : Dict = [x.strip() for x in pos_att_type.lower().split("|")] __lowerCAmelCase : Tuple = pos_att_type __lowerCAmelCase : Union[str, Any] = vocab_size __lowerCAmelCase : int = layer_norm_eps __lowerCAmelCase : List[Any] = kwargs.get("pooler_hidden_size" , _UpperCAmelCase) __lowerCAmelCase : Any = pooler_dropout __lowerCAmelCase : List[str] = pooler_hidden_act class A__ ( lowerCamelCase_ ): '''simple docstring''' @property def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> Tuple: """simple docstring""" if self.task == "multiple-choice": __lowerCAmelCase : Union[str, Any] = {0: "batch", 1: "choice", 2: "sequence"} else: __lowerCAmelCase : Dict = {0: "batch", 1: "sequence"} if self._config.type_vocab_size > 0: return OrderedDict( [("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis)]) else: return OrderedDict([("input_ids", dynamic_axis), ("attention_mask", dynamic_axis)]) @property def _SCREAMING_SNAKE_CASE ( self: str) -> int: """simple docstring""" return 12 def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , _SCREAMING_SNAKE_CASE: int = -1 , _SCREAMING_SNAKE_CASE: int = -1 , _SCREAMING_SNAKE_CASE: int = -1 , _SCREAMING_SNAKE_CASE: bool = False , _SCREAMING_SNAKE_CASE: Optional["TensorType"] = None , _SCREAMING_SNAKE_CASE: int = 3 , _SCREAMING_SNAKE_CASE: int = 40 , _SCREAMING_SNAKE_CASE: int = 40 , _SCREAMING_SNAKE_CASE: "PreTrainedTokenizerBase" = None , ) -> Union[str, Any]: """simple docstring""" __lowerCAmelCase : Any = super().generate_dummy_inputs(preprocessor=_UpperCAmelCase , framework=_UpperCAmelCase) if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs: del dummy_inputs["token_type_ids"] return dummy_inputs
269
'''simple docstring''' import argparse import logging import os import datasets import tensorflow as tf from transformers import AutoTokenizer UpperCAmelCase_ = logging.getLogger(__name__) def _UpperCamelCase ( ): '''simple docstring''' UpperCAmelCase__ = argparse.ArgumentParser( description="""Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.""" ) parser.add_argument( """--dataset_name""" , type=SCREAMING_SNAKE_CASE__ , default="""wikitext""" , help="""Name of the training. Explore datasets at: hf.co/datasets.""" , ) parser.add_argument( """--dataset_config""" , type=SCREAMING_SNAKE_CASE__ , default="""wikitext-103-raw-v1""" , help="""Configuration name of the dataset.""" ) parser.add_argument( """--tokenizer_name_or_path""" , type=SCREAMING_SNAKE_CASE__ , default="""sayakpaul/unigram-tokenizer-wikitext""" , help="""Tokenizer identifier. Can be a local filepath or a Hub identifier.""" , ) parser.add_argument( """--shard_size""" , type=SCREAMING_SNAKE_CASE__ , default=1000 , help="""Number of entries to go in a single shard.""" , ) parser.add_argument("""--split""" , type=SCREAMING_SNAKE_CASE__ , default="""train""" , choices=["""train""", """test""", """validation"""] ) parser.add_argument( """--limit""" , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , help="""Limit the number of shards (used for debugging).""" , ) parser.add_argument( """--max_length""" , type=SCREAMING_SNAKE_CASE__ , default=512 , help="""Maximum sequence length. For training on TPUs, it helps to have a maximum""" """ sequence length that is a multiple of 8.""" , ) parser.add_argument( """--output_dir""" , default="""tf-tpu""" , type=SCREAMING_SNAKE_CASE__ , help="""Output directory where the TFRecord shards will be saved. If the""" """ path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord""" """ shards will be directly saved to a Google Cloud Storage bucket.""" , ) UpperCAmelCase__ = parser.parse_args() return args def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Any ): '''simple docstring''' def fn(SCREAMING_SNAKE_CASE__ : Union[str, Any] ): return tokenizer(examples["""text"""] ) return fn def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] ): '''simple docstring''' UpperCAmelCase__ = [] for i in range(len(tokenized_data["""input_ids"""] ) ): UpperCAmelCase__ = { """input_ids""": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["""input_ids"""][i] ) ), """attention_mask""": tf.train.Feature( intaa_list=tf.train.IntaaList(value=tokenized_data["""attention_mask"""][i] ) ), } UpperCAmelCase__ = tf.train.Features(feature=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = tf.train.Example(features=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = example.SerializeToString() records.append(SCREAMING_SNAKE_CASE__ ) return records def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Any ): '''simple docstring''' UpperCAmelCase__ = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split ) if args.limit is not None: UpperCAmelCase__ = min(len(SCREAMING_SNAKE_CASE__ ) , args.limit ) UpperCAmelCase__ = dataset.select(range(SCREAMING_SNAKE_CASE__ ) ) print(F'''Limiting the dataset to {args.limit} entries.''' ) UpperCAmelCase__ = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path ) # Handle output directory creation. # For serializing into a Google Cloud Storage Bucket, one needs to first # create a bucket. if "gs" not in args.output_dir: if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) UpperCAmelCase__ = os.path.join(args.output_dir , args.split ) if not os.path.exists(SCREAMING_SNAKE_CASE__ ): os.makedirs(SCREAMING_SNAKE_CASE__ ) else: UpperCAmelCase__ = os.path.join(args.output_dir , args.split ) # Tokenize the whole dataset at once. UpperCAmelCase__ = tokenize_function(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = dataset.map(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , num_proc=4 , remove_columns=["""text"""] ) # We need to concatenate all our texts together, and then split the result # into chunks of a fixed size, which we will call block_size. To do this, we # will use the map method again, with the option batched=True. When we use batched=True, # the function we pass to map() will be passed multiple inputs at once, allowing us # to group them into more or fewer examples than we had in the input. # This allows us to create our new fixed-length samples. The advantage of this # method is that we don't lose a whole lot of content from the dataset compared to the # case where we simply tokenize with a pre-defined max_length. def group_texts(SCREAMING_SNAKE_CASE__ : int ): # Concatenate all texts. UpperCAmelCase__ = {k: sum(examples[k] , [] ) for k in examples.keys()} UpperCAmelCase__ = len(concatenated_examples[list(examples.keys() )[0]] ) # We drop the small remainder, though you could add padding instead if the model supports it # In this, as in all things, we advise you to follow your heart 🫀 UpperCAmelCase__ = (total_length // args.max_length) * args.max_length # Split by chunks of max_len. UpperCAmelCase__ = { k: [t[i : i + args.max_length] for i in range(0 , SCREAMING_SNAKE_CASE__ , args.max_length )] for k, t in concatenated_examples.items() } return result UpperCAmelCase__ = dataset_tokenized.map(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , batch_size=1000 , num_proc=4 ) UpperCAmelCase__ = 0 UpperCAmelCase__ = 0 for shard in range(0 , len(SCREAMING_SNAKE_CASE__ ) , args.shard_size ): UpperCAmelCase__ = grouped_dataset[shard : shard + args.shard_size] UpperCAmelCase__ = len(dataset_snapshot["""input_ids"""] ) UpperCAmelCase__ = os.path.join(SCREAMING_SNAKE_CASE__ , F'''dataset-{shard_count}-{records_containing}.tfrecord''' ) UpperCAmelCase__ = get_serialized_examples(SCREAMING_SNAKE_CASE__ ) with tf.io.TFRecordWriter(SCREAMING_SNAKE_CASE__ ) as out_file: for i in range(len(SCREAMING_SNAKE_CASE__ ) ): UpperCAmelCase__ = serialized_examples[i] out_file.write(SCREAMING_SNAKE_CASE__ ) print("""Wrote file {} containing {} records""".format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) shard_count += 1 total_records += records_containing with open(F'''split-{args.split}-records-count.txt''' , """w""" ) as f: print(F'''Total {args.split} records: {total_records}''' , file=SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": UpperCAmelCase_ = parse_args() main(args)
346
0
from typing import TYPE_CHECKING import torch from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ ): A : List[Any] = """dandelin/vilt-b32-finetuned-vqa""" A : int = ( """This is a tool that answers a question about an image. It takes an input named `image` which should be the """ """image containing the information, as well as a `question` which should be the question in English. It """ """returns a text that is the answer to the question.""" ) A : Dict = """image_qa""" A : int = AutoProcessor A : Dict = AutoModelForVisualQuestionAnswering A : Dict = ["""image""", """text"""] A : Dict = ["""text"""] def __init__( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ): requires_backends(self , ['''vision'''] ) super().__init__(*_UpperCAmelCase , **_UpperCAmelCase ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): return self.pre_processor(_UpperCAmelCase , _UpperCAmelCase , return_tensors='''pt''' ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ): with torch.no_grad(): return self.model(**_UpperCAmelCase ).logits def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ): lowercase : Union[str, Any] = outputs.argmax(-1 ).item() return self.model.config.idalabel[idx]
337
'''simple docstring''' import numpy as np import torch from torch.nn import CrossEntropyLoss from transformers import AutoModelForCausalLM, AutoTokenizer import datasets from datasets import logging UpperCAmelCase_ = '\\n\n' UpperCAmelCase_ = '\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n' UpperCAmelCase_ = '\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to \'cuda\' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 78.22\n >>> print(round(results["perplexities"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = datasets.load_dataset("wikitext",\n ... "wikitext-2-raw-v1",\n ... split="test")["text"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!=\'\']\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 60.35\n >>> print(round(results["perplexities"][0], 2))\n 81.12\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase_ ( datasets.Metric ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Dict ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """input_texts""": datasets.Value("""string""" ), } ) , reference_urls=["""https://huggingface.co/docs/transformers/perplexity"""] , ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : int = 16 , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[int]=None ): """simple docstring""" if device is not None: assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu." if device == "gpu": UpperCAmelCase__ = """cuda""" else: UpperCAmelCase__ = """cuda""" if torch.cuda.is_available() else """cpu""" UpperCAmelCase__ = AutoModelForCausalLM.from_pretrained(_UpperCAmelCase ) UpperCAmelCase__ = model.to(_UpperCAmelCase ) UpperCAmelCase__ = AutoTokenizer.from_pretrained(_UpperCAmelCase ) # if batch_size > 1 (which generally leads to padding being required), and # if there is not an already assigned pad_token, assign an existing # special token to also be the padding token if tokenizer.pad_token is None and batch_size > 1: UpperCAmelCase__ = list(tokenizer.special_tokens_map_extended.values() ) # check that the model already has at least one special token defined assert ( len(_UpperCAmelCase ) > 0 ), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1." # assign one of the special tokens to also be the pad token tokenizer.add_special_tokens({"""pad_token""": existing_special_tokens[0]} ) if add_start_token: # leave room for <BOS> token to be added: assert ( tokenizer.bos_token is not None ), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False" UpperCAmelCase__ = model.config.max_length - 1 else: UpperCAmelCase__ = model.config.max_length UpperCAmelCase__ = tokenizer( _UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , return_tensors="""pt""" , return_attention_mask=_UpperCAmelCase , ).to(_UpperCAmelCase ) UpperCAmelCase__ = encodings["""input_ids"""] UpperCAmelCase__ = encodings["""attention_mask"""] # check that each input is long enough: if add_start_token: assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long." else: assert torch.all( torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings." UpperCAmelCase__ = [] UpperCAmelCase__ = CrossEntropyLoss(reduction="""none""" ) for start_index in logging.tqdm(range(0 , len(_UpperCAmelCase ) , _UpperCAmelCase ) ): UpperCAmelCase__ = min(start_index + batch_size , len(_UpperCAmelCase ) ) UpperCAmelCase__ = encoded_texts[start_index:end_index] UpperCAmelCase__ = attn_masks[start_index:end_index] if add_start_token: UpperCAmelCase__ = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(_UpperCAmelCase ) UpperCAmelCase__ = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 ) UpperCAmelCase__ = torch.cat( [torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(_UpperCAmelCase ), attn_mask] , dim=1 ) UpperCAmelCase__ = encoded_batch with torch.no_grad(): UpperCAmelCase__ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase ).logits UpperCAmelCase__ = out_logits[..., :-1, :].contiguous() UpperCAmelCase__ = labels[..., 1:].contiguous() UpperCAmelCase__ = attn_mask[..., 1:].contiguous() UpperCAmelCase__ = torch.expa( (loss_fct(shift_logits.transpose(1 , 2 ) , _UpperCAmelCase ) * shift_attention_mask_batch).sum(1 ) / shift_attention_mask_batch.sum(1 ) ) ppls += perplexity_batch.tolist() return {"perplexities": ppls, "mean_perplexity": np.mean(_UpperCAmelCase )}
346
0
import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. snake_case_ = abspath(join(dirname(dirname(__file__)), '''src''')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='''ignore''', category=FutureWarning) def snake_case__ ( SCREAMING_SNAKE_CASE_ : List[str] ): '''simple docstring''' from diffusers.utils.testing_utils import pytest_addoption_shared pytest_addoption_shared(SCREAMING_SNAKE_CASE__ ) def snake_case__ ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ): '''simple docstring''' from diffusers.utils.testing_utils import pytest_terminal_summary_main lowercase__ : int = terminalreporter.config.getoption('--make-reports' ) if make_reports: pytest_terminal_summary_main(SCREAMING_SNAKE_CASE__ , id=SCREAMING_SNAKE_CASE__ )
214
'''simple docstring''' def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int = 1000000 ): '''simple docstring''' UpperCAmelCase__ = [i - 1 for i in range(limit + 1 )] for i in range(2 , limit + 1 ): if phi[i] == i - 1: for j in range(2 * i , limit + 1 , SCREAMING_SNAKE_CASE__ ): phi[j] -= phi[j] // i return sum(phi[2 : limit + 1] ) if __name__ == "__main__": print(solution())
346
0
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_albert import AlbertTokenizer else: __lowerCAmelCase : Optional[int] =None __lowerCAmelCase : List[str] =logging.get_logger(__name__) __lowerCAmelCase : Union[str, Any] ={'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'} __lowerCAmelCase : str ={ 'vocab_file': { 'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model', 'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model', 'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model', 'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model', 'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model', 'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model', 'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model', 'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model', }, 'tokenizer_file': { 'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json', 'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json', 'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json', 'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json', 'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json', 'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json', 'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json', 'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json', }, } __lowerCAmelCase : Dict ={ 'albert-base-v1': 5_1_2, 'albert-large-v1': 5_1_2, 'albert-xlarge-v1': 5_1_2, 'albert-xxlarge-v1': 5_1_2, 'albert-base-v2': 5_1_2, 'albert-large-v2': 5_1_2, 'albert-xlarge-v2': 5_1_2, 'albert-xxlarge-v2': 5_1_2, } __lowerCAmelCase : List[Any] ='▁' class _lowercase ( lowerCamelCase_ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE__ : int = AlbertTokenizer def __init__( self :Tuple , lowerCAmelCase__ :List[str]=None , lowerCAmelCase__ :Optional[int]=None , lowerCAmelCase__ :Dict=True , lowerCAmelCase__ :Tuple=True , lowerCAmelCase__ :int=False , lowerCAmelCase__ :str="[CLS]" , lowerCAmelCase__ :Optional[Any]="[SEP]" , lowerCAmelCase__ :Optional[int]="<unk>" , lowerCAmelCase__ :List[str]="[SEP]" , lowerCAmelCase__ :Optional[Any]="<pad>" , lowerCAmelCase__ :List[Any]="[CLS]" , lowerCAmelCase__ :Tuple="[MASK]" , **lowerCAmelCase__ :Union[str, Any] , ) -> str: __SCREAMING_SNAKE_CASE : Dict = ( AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase , normalized=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token ) super().__init__( _UpperCAmelCase , tokenizer_file=_UpperCAmelCase , do_lower_case=_UpperCAmelCase , remove_space=_UpperCAmelCase , keep_accents=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , **_UpperCAmelCase , ) __SCREAMING_SNAKE_CASE : Dict = do_lower_case __SCREAMING_SNAKE_CASE : Optional[Any] = remove_space __SCREAMING_SNAKE_CASE : Tuple = keep_accents __SCREAMING_SNAKE_CASE : Union[str, Any] = vocab_file __SCREAMING_SNAKE_CASE : Union[str, Any] = False if not self.vocab_file else True def __magic_name__( self :Dict , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE : Union[str, Any] = [self.sep_token_id] __SCREAMING_SNAKE_CASE : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def __magic_name__( self :List[str] , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None ) -> Dict: __SCREAMING_SNAKE_CASE : int = [self.sep_token_id] __SCREAMING_SNAKE_CASE : Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __magic_name__( self :Optional[int] , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[str] = None ) -> Dict: if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(_UpperCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __SCREAMING_SNAKE_CASE : Dict = os.path.join( _UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ): copyfile(self.vocab_file , _UpperCAmelCase ) return (out_vocab_file,)
9
'''simple docstring''' from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING UpperCAmelCase_ = logging.get_logger(__name__) @add_end_docstrings(lowerCamelCase_ ) class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' def __init__( self : Optional[Any] , *_UpperCAmelCase : Union[str, Any] , **_UpperCAmelCase : Dict ): """simple docstring""" super().__init__(*_UpperCAmelCase , **_UpperCAmelCase ) requires_backends(self , """vision""" ) self.check_model_type( TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING if self.framework == """tf""" else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING ) def SCREAMING_SNAKE_CASE__ ( self : str , _UpperCAmelCase : List[Any]=None ): """simple docstring""" UpperCAmelCase__ = {} if top_k is not None: UpperCAmelCase__ = top_k return {}, {}, postprocess_params def __call__( self : Any , _UpperCAmelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_UpperCAmelCase : str ): """simple docstring""" return super().__call__(_UpperCAmelCase , **_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , _UpperCAmelCase : Tuple ): """simple docstring""" UpperCAmelCase__ = load_image(_UpperCAmelCase ) UpperCAmelCase__ = self.image_processor(images=_UpperCAmelCase , return_tensors=self.framework ) return model_inputs def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : Tuple ): """simple docstring""" UpperCAmelCase__ = self.model(**_UpperCAmelCase ) return model_outputs def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : str=5 ): """simple docstring""" if top_k > self.model.config.num_labels: UpperCAmelCase__ = self.model.config.num_labels if self.framework == "pt": UpperCAmelCase__ = model_outputs.logits.softmax(-1 )[0] UpperCAmelCase__ , UpperCAmelCase__ = probs.topk(_UpperCAmelCase ) elif self.framework == "tf": UpperCAmelCase__ = stable_softmax(model_outputs.logits , axis=-1 )[0] UpperCAmelCase__ = tf.math.top_k(_UpperCAmelCase , k=_UpperCAmelCase ) UpperCAmelCase__ , UpperCAmelCase__ = topk.values.numpy(), topk.indices.numpy() else: raise ValueError(f'''Unsupported framework: {self.framework}''' ) UpperCAmelCase__ = scores.tolist() UpperCAmelCase__ = ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCAmelCase , _UpperCAmelCase )]
346
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __A = {"configuration_timm_backbone": ["TimmBackboneConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ["TimmBackbone"] if TYPE_CHECKING: from .configuration_timm_backbone import TimmBackboneConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timm_backbone import TimmBackbone else: import sys __A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
177
'''simple docstring''' from math import factorial def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int = 20 ): '''simple docstring''' UpperCAmelCase__ = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1, # 2, 3,... UpperCAmelCase__ = n // 2 return int(factorial(SCREAMING_SNAKE_CASE__ ) / (factorial(SCREAMING_SNAKE_CASE__ ) * factorial(n - k )) ) if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution(2_0)) else: try: UpperCAmelCase_ = int(sys.argv[1]) print(solution(n)) except ValueError: print('Invalid entry - please enter a number.')
346
0
'''simple docstring''' def __UpperCamelCase ( ): lowercase__ : Tuple = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] lowercase__ : List[Any] = 6 lowercase__ : Dict = 1 lowercase__ : Optional[int] = 1901 lowercase__ : Union[str, Any] = 0 while year < 2001: day += 7 if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0): if day > days_per_month[month - 1] and month != 2: month += 1 lowercase__ : Union[str, Any] = day - days_per_month[month - 2] elif day > 29 and month == 2: month += 1 lowercase__ : str = day - 29 else: if day > days_per_month[month - 1]: month += 1 lowercase__ : Optional[Any] = day - days_per_month[month - 2] if month > 12: year += 1 lowercase__ : List[Any] = 1 if year < 2001 and day == 1: sundays += 1 return sundays if __name__ == "__main__": print(solution())
198
'''simple docstring''' import json import os import unittest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ : int = MgpstrTokenizer lowerCAmelCase_ : List[str] = False lowerCAmelCase_ : Optional[int] = {} lowerCAmelCase_ : Any = False def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): """simple docstring""" super().setUp() # fmt: off UpperCAmelCase__ = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""] # fmt: on UpperCAmelCase__ = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) ) UpperCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(_UpperCAmelCase ) + """\n""" ) def SCREAMING_SNAKE_CASE__ ( self : List[str] , **_UpperCAmelCase : Optional[Any] ): """simple docstring""" return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : Tuple ): """simple docstring""" UpperCAmelCase__ = """tester""" UpperCAmelCase__ = """tester""" return input_text, output_text @unittest.skip("""MGP-STR always lower cases letters.""" ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" pass def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" UpperCAmelCase__ = self.get_tokenizers(do_lower_case=_UpperCAmelCase ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): UpperCAmelCase__ = """[SPECIAL_TOKEN]""" tokenizer.add_special_tokens({"""cls_token""": special_token} ) UpperCAmelCase__ = tokenizer.encode([special_token] , add_special_tokens=_UpperCAmelCase ) self.assertEqual(len(_UpperCAmelCase ) , 1 ) UpperCAmelCase__ = tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase ) self.assertTrue(special_token not in decoded ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" UpperCAmelCase__ = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): UpperCAmelCase__ , UpperCAmelCase__ = self.get_input_output_texts(_UpperCAmelCase ) UpperCAmelCase__ = tokenizer.tokenize(_UpperCAmelCase ) UpperCAmelCase__ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) UpperCAmelCase__ = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = tokenizer.convert_ids_to_tokens(_UpperCAmelCase ) self.assertNotEqual(len(_UpperCAmelCase ) , 0 ) UpperCAmelCase__ = tokenizer.decode(_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase ) self.assertEqual(text_a.replace(""" """ , """""" ) , _UpperCAmelCase ) @unittest.skip("""MGP-STR tokenizer only handles one sequence.""" ) def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" pass @unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""" ) def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" pass
346
0
import logging import os import threading import time try: import warnings except ImportError: lowerCamelCase : Optional[int] =None try: import msvcrt except ImportError: lowerCamelCase : int =None try: import fcntl except ImportError: lowerCamelCase : List[Any] =None # Backward compatibility # ------------------------------------------------ try: TimeoutError except NameError: lowerCamelCase : List[Any] =OSError # Data # ------------------------------------------------ lowerCamelCase : Dict =[ '''Timeout''', '''BaseFileLock''', '''WindowsFileLock''', '''UnixFileLock''', '''SoftFileLock''', '''FileLock''', ] lowerCamelCase : int ='''3.0.12''' lowerCamelCase : Dict =None def SCREAMING_SNAKE_CASE ( ) -> Tuple: global _logger UpperCamelCase__ : List[Any] = _logger or logging.getLogger(__name__ ) return _logger class __a ( lowerCamelCase_ ): def __init__( self : str , SCREAMING_SNAKE_CASE : Tuple ): '''simple docstring''' UpperCamelCase__ : Union[str, Any] = lock_file return None def __str__( self : Optional[Any] ): '''simple docstring''' UpperCamelCase__ : Dict = F'The file lock \'{self.lock_file}\' could not be acquired.' return temp class __a : def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple ): '''simple docstring''' UpperCamelCase__ : Optional[Any] = lock return None def __enter__( self : Optional[int] ): '''simple docstring''' return self.lock def __exit__( self : List[str] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : str ): '''simple docstring''' self.lock.release() return None class __a : def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Tuple=-1 , SCREAMING_SNAKE_CASE : Any=None ): '''simple docstring''' UpperCamelCase__ : int = max_filename_length if max_filename_length is not None else 2_55 # Hash the filename if it's too long UpperCamelCase__ : Tuple = self.hash_filename_if_too_long(_UpperCAmelCase , _UpperCAmelCase ) # The path to the lock file. UpperCamelCase__ : Any = lock_file # The file descriptor for the *_lock_file* as it is returned by the # os.open() function. # This file lock is only NOT None, if the object currently holds the # lock. UpperCamelCase__ : int = None # The default timeout value. UpperCamelCase__ : Any = timeout # We use this lock primarily for the lock counter. UpperCamelCase__ : Optional[int] = threading.Lock() # The lock counter is used for implementing the nested locking # mechanism. Whenever the lock is acquired, the counter is increased and # the lock is only released, when this value is 0 again. UpperCamelCase__ : Union[str, Any] = 0 return None @property def __lowercase ( self : List[Any] ): '''simple docstring''' return self._lock_file @property def __lowercase ( self : Tuple ): '''simple docstring''' return self._timeout @timeout.setter def __lowercase ( self : Dict , SCREAMING_SNAKE_CASE : str ): '''simple docstring''' UpperCamelCase__ : str = float(_UpperCAmelCase ) return None def __lowercase ( self : Tuple ): '''simple docstring''' raise NotImplementedError() def __lowercase ( self : Tuple ): '''simple docstring''' raise NotImplementedError() @property def __lowercase ( self : Union[str, Any] ): '''simple docstring''' return self._lock_file_fd is not None def __lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE : Dict=None , SCREAMING_SNAKE_CASE : str=0.0_5 ): '''simple docstring''' if timeout is None: UpperCamelCase__ : Optional[Any] = self.timeout # Increment the number right at the beginning. # We can still undo it, if something fails. with self._thread_lock: self._lock_counter += 1 UpperCamelCase__ : str = id(self ) UpperCamelCase__ : List[str] = self._lock_file UpperCamelCase__ : int = time.time() try: while True: with self._thread_lock: if not self.is_locked: logger().debug(F'Attempting to acquire lock {lock_id} on {lock_filename}' ) self._acquire() if self.is_locked: logger().debug(F'Lock {lock_id} acquired on {lock_filename}' ) break elif timeout >= 0 and time.time() - start_time > timeout: logger().debug(F'Timeout on acquiring lock {lock_id} on {lock_filename}' ) raise Timeout(self._lock_file ) else: logger().debug( F'Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...' ) time.sleep(_UpperCAmelCase ) except: # noqa # Something did go wrong, so decrement the counter. with self._thread_lock: UpperCamelCase__ : str = max(0 , self._lock_counter - 1 ) raise return _Acquire_ReturnProxy(lock=self ) def __lowercase ( self : List[str] , SCREAMING_SNAKE_CASE : Dict=False ): '''simple docstring''' with self._thread_lock: if self.is_locked: self._lock_counter -= 1 if self._lock_counter == 0 or force: UpperCamelCase__ : str = id(self ) UpperCamelCase__ : Union[str, Any] = self._lock_file logger().debug(F'Attempting to release lock {lock_id} on {lock_filename}' ) self._release() UpperCamelCase__ : Optional[int] = 0 logger().debug(F'Lock {lock_id} released on {lock_filename}' ) return None def __enter__( self : Union[str, Any] ): '''simple docstring''' self.acquire() return self def __exit__( self : Tuple , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : str ): '''simple docstring''' self.release() return None def __del__( self : int ): '''simple docstring''' self.release(force=_UpperCAmelCase ) return None def __lowercase ( self : Tuple , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int ): '''simple docstring''' UpperCamelCase__ : Union[str, Any] = os.path.basename(_UpperCAmelCase ) if len(_UpperCAmelCase ) > max_length and max_length > 0: UpperCamelCase__ : Union[str, Any] = os.path.dirname(_UpperCAmelCase ) UpperCamelCase__ : int = str(hash(_UpperCAmelCase ) ) UpperCamelCase__ : Dict = filename[: max_length - len(_UpperCAmelCase ) - 8] + "..." + hashed_filename + ".lock" return os.path.join(_UpperCAmelCase , _UpperCAmelCase ) else: return path class __a ( lowerCamelCase_ ): def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Union[str, Any]=-1 , SCREAMING_SNAKE_CASE : Dict=None ): '''simple docstring''' from .file_utils import relative_to_absolute_path super().__init__(_UpperCAmelCase , timeout=_UpperCAmelCase , max_filename_length=_UpperCAmelCase ) UpperCamelCase__ : Union[str, Any] = "\\\\?\\" + relative_to_absolute_path(self.lock_file ) def __lowercase ( self : str ): '''simple docstring''' UpperCamelCase__ : int = os.O_RDWR | os.O_CREAT | os.O_TRUNC try: UpperCamelCase__ : Any = os.open(self._lock_file , _UpperCAmelCase ) except OSError: pass else: try: msvcrt.locking(_UpperCAmelCase , msvcrt.LK_NBLCK , 1 ) except OSError: os.close(_UpperCAmelCase ) else: UpperCamelCase__ : Optional[int] = fd return None def __lowercase ( self : Union[str, Any] ): '''simple docstring''' UpperCamelCase__ : int = self._lock_file_fd UpperCamelCase__ : List[str] = None msvcrt.locking(_UpperCAmelCase , msvcrt.LK_UNLCK , 1 ) os.close(_UpperCAmelCase ) try: os.remove(self._lock_file ) # Probably another instance of the application # that acquired the file lock. except OSError: pass return None class __a ( lowerCamelCase_ ): def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Any=-1 , SCREAMING_SNAKE_CASE : Union[str, Any]=None ): '''simple docstring''' UpperCamelCase__ : str = os.statvfs(os.path.dirname(_UpperCAmelCase ) ).f_namemax super().__init__(_UpperCAmelCase , timeout=_UpperCAmelCase , max_filename_length=_UpperCAmelCase ) def __lowercase ( self : Optional[Any] ): '''simple docstring''' UpperCamelCase__ : int = os.O_RDWR | os.O_CREAT | os.O_TRUNC UpperCamelCase__ : Tuple = os.open(self._lock_file , _UpperCAmelCase ) try: fcntl.flock(_UpperCAmelCase , fcntl.LOCK_EX | fcntl.LOCK_NB ) except OSError: os.close(_UpperCAmelCase ) else: UpperCamelCase__ : int = fd return None def __lowercase ( self : Dict ): '''simple docstring''' UpperCamelCase__ : str = self._lock_file_fd UpperCamelCase__ : List[str] = None fcntl.flock(_UpperCAmelCase , fcntl.LOCK_UN ) os.close(_UpperCAmelCase ) return None class __a ( lowerCamelCase_ ): def __lowercase ( self : str ): '''simple docstring''' UpperCamelCase__ : int = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC try: UpperCamelCase__ : List[str] = os.open(self._lock_file , _UpperCAmelCase ) except OSError: pass else: UpperCamelCase__ : List[str] = fd return None def __lowercase ( self : Optional[Any] ): '''simple docstring''' os.close(self._lock_file_fd ) UpperCamelCase__ : Any = None try: os.remove(self._lock_file ) # The file is already deleted and that's what we want. except OSError: pass return None lowerCamelCase : List[Any] =None if msvcrt: lowerCamelCase : Optional[int] =WindowsFileLock elif fcntl: lowerCamelCase : Tuple =UnixFileLock else: lowerCamelCase : str =SoftFileLock if warnings is not None: warnings.warn('''only soft file lock is available''')
189
'''simple docstring''' from abc import ABC, abstractmethod from typing import List, Optional class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' def __init__( self : Union[str, Any] ): """simple docstring""" self.test() def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" UpperCAmelCase__ = 0 UpperCAmelCase__ = False while not completed: if counter == 1: self.reset() UpperCAmelCase__ = self.advance() if not self.does_advance(_UpperCAmelCase ): raise Exception( """Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.""" ) UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.update(_UpperCAmelCase ) counter += 1 if counter > 1_00_00: raise Exception("""update() does not fulfill the constraint.""" ) if self.remaining() != 0: raise Exception("""Custom Constraint is not defined correctly.""" ) @abstractmethod def SCREAMING_SNAKE_CASE__ ( self : Dict ): """simple docstring""" raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : int ): """simple docstring""" raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def SCREAMING_SNAKE_CASE__ ( self : Any , _UpperCAmelCase : int ): """simple docstring""" raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): """simple docstring""" raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : List[Any]=False ): """simple docstring""" raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' def __init__( self : Union[str, Any] , _UpperCAmelCase : List[int] ): """simple docstring""" super(_UpperCAmelCase , self ).__init__() if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or len(_UpperCAmelCase ) == 0: raise ValueError(f'''`token_ids` has to be a non-empty list, but is {token_ids}.''' ) if any((not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or token_id < 0) for token_id in token_ids ): raise ValueError(f'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' ) UpperCAmelCase__ = token_ids UpperCAmelCase__ = len(self.token_ids ) UpperCAmelCase__ = -1 # the index of the currently fulfilled step UpperCAmelCase__ = False def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" if self.completed: return None return self.token_ids[self.fulfilled_idx + 1] def SCREAMING_SNAKE_CASE__ ( self : List[str] , _UpperCAmelCase : int ): """simple docstring""" if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(_UpperCAmelCase )}''' ) if self.completed: return False return token_id == self.token_ids[self.fulfilled_idx + 1] def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , _UpperCAmelCase : int ): """simple docstring""" if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(_UpperCAmelCase )}''' ) UpperCAmelCase__ = False UpperCAmelCase__ = False UpperCAmelCase__ = False if self.does_advance(_UpperCAmelCase ): self.fulfilled_idx += 1 UpperCAmelCase__ = True if self.fulfilled_idx == (self.seqlen - 1): UpperCAmelCase__ = True UpperCAmelCase__ = completed else: # failed to make progress. UpperCAmelCase__ = True self.reset() return stepped, completed, reset def SCREAMING_SNAKE_CASE__ ( self : Tuple ): """simple docstring""" UpperCAmelCase__ = False UpperCAmelCase__ = 0 def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" return self.seqlen - (self.fulfilled_idx + 1) def SCREAMING_SNAKE_CASE__ ( self : Any , _UpperCAmelCase : Optional[int]=False ): """simple docstring""" UpperCAmelCase__ = PhrasalConstraint(self.token_ids ) if stateful: UpperCAmelCase__ = self.seqlen UpperCAmelCase__ = self.fulfilled_idx UpperCAmelCase__ = self.completed return new_constraint class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Any , _UpperCAmelCase : List[List[int]] , _UpperCAmelCase : List[str]=True ): """simple docstring""" UpperCAmelCase__ = max([len(_UpperCAmelCase ) for one in nested_token_ids] ) UpperCAmelCase__ = {} for token_ids in nested_token_ids: UpperCAmelCase__ = root for tidx, token_id in enumerate(_UpperCAmelCase ): if token_id not in level: UpperCAmelCase__ = {} UpperCAmelCase__ = level[token_id] if no_subsets and self.has_subsets(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError( """Each list in `nested_token_ids` can't be a complete subset of another list, but is""" f''' {nested_token_ids}.''' ) UpperCAmelCase__ = root def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : int ): """simple docstring""" UpperCAmelCase__ = self.trie for current_token in current_seq: UpperCAmelCase__ = start[current_token] UpperCAmelCase__ = list(start.keys() ) return next_tokens def SCREAMING_SNAKE_CASE__ ( self : str , _UpperCAmelCase : Tuple ): """simple docstring""" UpperCAmelCase__ = self.next_tokens(_UpperCAmelCase ) return len(_UpperCAmelCase ) == 0 def SCREAMING_SNAKE_CASE__ ( self : List[str] , _UpperCAmelCase : Optional[int] ): """simple docstring""" UpperCAmelCase__ = list(root.values() ) if len(_UpperCAmelCase ) == 0: return 1 else: return sum([self.count_leaves(_UpperCAmelCase ) for nn in next_nodes] ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict ): """simple docstring""" UpperCAmelCase__ = self.count_leaves(_UpperCAmelCase ) return len(_UpperCAmelCase ) != leaf_count class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' def __init__( self : Dict , _UpperCAmelCase : List[List[int]] ): """simple docstring""" super(_UpperCAmelCase , self ).__init__() if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or len(_UpperCAmelCase ) == 0: raise ValueError(f'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' ) if any(not isinstance(_UpperCAmelCase , _UpperCAmelCase ) for token_ids in nested_token_ids ): raise ValueError(f'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' ) if any( any((not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or token_id < 0) for token_id in token_ids ) for token_ids in nested_token_ids ): raise ValueError( f'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' ) UpperCAmelCase__ = DisjunctiveTrie(_UpperCAmelCase ) UpperCAmelCase__ = nested_token_ids UpperCAmelCase__ = self.trie.max_height UpperCAmelCase__ = [] UpperCAmelCase__ = False def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" UpperCAmelCase__ = self.trie.next_tokens(self.current_seq ) if len(_UpperCAmelCase ) == 0: return None else: return token_list def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : int ): """simple docstring""" if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(_UpperCAmelCase )}''' ) UpperCAmelCase__ = self.trie.next_tokens(self.current_seq ) return token_id in next_tokens def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : int ): """simple docstring""" if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(_UpperCAmelCase )}''' ) UpperCAmelCase__ = False UpperCAmelCase__ = False UpperCAmelCase__ = False if self.does_advance(_UpperCAmelCase ): self.current_seq.append(_UpperCAmelCase ) UpperCAmelCase__ = True else: UpperCAmelCase__ = True self.reset() UpperCAmelCase__ = self.trie.reached_leaf(self.current_seq ) UpperCAmelCase__ = completed return stepped, completed, reset def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" UpperCAmelCase__ = False UpperCAmelCase__ = [] def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" if self.completed: # since this can be completed without reaching max height return 0 else: return self.seqlen - len(self.current_seq ) def SCREAMING_SNAKE_CASE__ ( self : Tuple , _UpperCAmelCase : Dict=False ): """simple docstring""" UpperCAmelCase__ = DisjunctiveConstraint(self.token_ids ) if stateful: UpperCAmelCase__ = self.seqlen UpperCAmelCase__ = self.current_seq UpperCAmelCase__ = self.completed return new_constraint class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Optional[int] , _UpperCAmelCase : List[Constraint] ): """simple docstring""" UpperCAmelCase__ = constraints # max # of steps required to fulfill a given constraint UpperCAmelCase__ = max([c.seqlen for c in constraints] ) UpperCAmelCase__ = len(_UpperCAmelCase ) UpperCAmelCase__ = False self.init_state() def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" UpperCAmelCase__ = [] UpperCAmelCase__ = None UpperCAmelCase__ = [constraint.copy(stateful=_UpperCAmelCase ) for constraint in self.constraints] def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" UpperCAmelCase__ = 0 if self.inprogress_constraint: # extra points for having a constraint mid-fulfilled add += self.max_seqlen - self.inprogress_constraint.remaining() return (len(self.complete_constraints ) * self.max_seqlen) + add def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): """simple docstring""" UpperCAmelCase__ = [] if self.inprogress_constraint is None: for constraint in self.pending_constraints: # "pending" == "unfulfilled yet" UpperCAmelCase__ = constraint.advance() if isinstance(_UpperCAmelCase , _UpperCAmelCase ): token_list.append(_UpperCAmelCase ) elif isinstance(_UpperCAmelCase , _UpperCAmelCase ): token_list.extend(_UpperCAmelCase ) else: UpperCAmelCase__ = self.inprogress_constraint.advance() if isinstance(_UpperCAmelCase , _UpperCAmelCase ): token_list.append(_UpperCAmelCase ) elif isinstance(_UpperCAmelCase , _UpperCAmelCase ): token_list.extend(_UpperCAmelCase ) if len(_UpperCAmelCase ) == 0: return None else: return token_list def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : Optional[List[int]] ): """simple docstring""" self.init_state() if token_ids is not None: for token in token_ids: # completes or steps **one** constraint UpperCAmelCase__ , UpperCAmelCase__ = self.add(_UpperCAmelCase ) # the entire list of constraints are fulfilled if self.completed: break def SCREAMING_SNAKE_CASE__ ( self : Any , _UpperCAmelCase : int ): """simple docstring""" if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError(f'''`token_id` should be an `int`, but is `{token_id}`.''' ) UpperCAmelCase__ , UpperCAmelCase__ = False, False if self.completed: UpperCAmelCase__ = True UpperCAmelCase__ = False return complete, stepped if self.inprogress_constraint is not None: # In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current # job, simply update the state UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.inprogress_constraint.update(_UpperCAmelCase ) if reset: # 1. If the next token breaks the progress, then we must restart. # e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books". # But that doesn't mean we self.init_state(), since we only reset the state for this particular # constraint, not the full list of constraints. self.pending_constraints.append(self.inprogress_constraint.copy(stateful=_UpperCAmelCase ) ) UpperCAmelCase__ = None if complete: # 2. If the next token completes the constraint, move it to completed list, set # inprogress to None. If there are no pending constraints either, then this full list of constraints # is complete. self.complete_constraints.append(self.inprogress_constraint ) UpperCAmelCase__ = None if len(self.pending_constraints ) == 0: # we're done! UpperCAmelCase__ = True else: # Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list # of constraints? for cidx, pending_constraint in enumerate(self.pending_constraints ): if pending_constraint.does_advance(_UpperCAmelCase ): UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = pending_constraint.update(_UpperCAmelCase ) if not stepped: raise Exception( """`constraint.update(token_id)` is not yielding incremental progress, """ """even though `constraint.does_advance(token_id)` is true.""" ) if complete: self.complete_constraints.append(_UpperCAmelCase ) UpperCAmelCase__ = None if not complete and stepped: UpperCAmelCase__ = pending_constraint if complete or stepped: # If we made any progress at all, then it's at least not a "pending constraint". UpperCAmelCase__ = ( self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :] ) if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None: # If there's no longer any pending after this and no inprogress either, then we must be # complete. UpperCAmelCase__ = True break # prevent accidentally stepping through multiple constraints with just one token. return complete, stepped def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : List[Any]=True ): """simple docstring""" UpperCAmelCase__ = ConstraintListState(self.constraints ) # we actually never though self.constraints objects # throughout this process. So it's at initialization state. if stateful: UpperCAmelCase__ = [ constraint.copy(stateful=_UpperCAmelCase ) for constraint in self.complete_constraints ] if self.inprogress_constraint is not None: UpperCAmelCase__ = self.inprogress_constraint.copy(stateful=_UpperCAmelCase ) UpperCAmelCase__ = [constraint.copy() for constraint in self.pending_constraints] return new_state
346
0
import requests UpperCamelCase__ = """""" # <-- Put your OpenWeatherMap appid here! UpperCamelCase__ = """https://api.openweathermap.org/data/2.5/""" def _a ( SCREAMING_SNAKE_CASE_ : str = "Chicago" , SCREAMING_SNAKE_CASE_ : str = APPID ): return requests.get(URL_BASE + "weather" , params=locals() ).json() def _a ( SCREAMING_SNAKE_CASE_ : str = "Kolkata, India" , SCREAMING_SNAKE_CASE_ : str = APPID ): return requests.get(URL_BASE + "forecast" , params=locals() ).json() def _a ( SCREAMING_SNAKE_CASE_ : float = 55.68 , SCREAMING_SNAKE_CASE_ : float = 12.57 , SCREAMING_SNAKE_CASE_ : str = APPID ): return requests.get(URL_BASE + "onecall" , params=locals() ).json() if __name__ == "__main__": from pprint import pprint while True: UpperCamelCase__ = input("""Enter a location:""").strip() if location: pprint(current_weather(location)) else: break
92
'''simple docstring''' import doctest import logging import os import unittest from pathlib import Path from typing import List, Union import transformers from transformers.testing_utils import require_tf, require_torch, slow UpperCAmelCase_ = logging.getLogger() @unittest.skip("""Temporarily disable the doc tests.""" ) @require_torch @require_tf @slow class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : str , _UpperCAmelCase : Path , _UpperCAmelCase : Union[str, None] = None , _UpperCAmelCase : Union[List[str], None] = None , _UpperCAmelCase : Union[str, List[str], None] = None , _UpperCAmelCase : bool = True , ): """simple docstring""" UpperCAmelCase__ = [file for file in os.listdir(_UpperCAmelCase ) if os.path.isfile(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) )] if identifier is not None: UpperCAmelCase__ = [file for file in files if identifier in file] if n_identifier is not None: if isinstance(_UpperCAmelCase , _UpperCAmelCase ): for n_ in n_identifier: UpperCAmelCase__ = [file for file in files if n_ not in file] else: UpperCAmelCase__ = [file for file in files if n_identifier not in file] UpperCAmelCase__ = ignore_files or [] ignore_files.append("""__init__.py""" ) UpperCAmelCase__ = [file for file in files if file not in ignore_files] for file in files: # Open all files print("""Testing""" , _UpperCAmelCase ) if only_modules: UpperCAmelCase__ = file.split(""".""" )[0] try: UpperCAmelCase__ = getattr(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = doctest.DocTestSuite(_UpperCAmelCase ) UpperCAmelCase__ = unittest.TextTestRunner().run(_UpperCAmelCase ) self.assertIs(len(result.failures ) , 0 ) except AttributeError: logger.info(f'''{module_identifier} is not a module.''' ) else: UpperCAmelCase__ = doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS ) self.assertIs(result.failed , 0 ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" UpperCAmelCase__ = Path("""src/transformers""" ) UpperCAmelCase__ = """modeling""" UpperCAmelCase__ = [ """modeling_ctrl.py""", """modeling_tf_ctrl.py""", ] self.analyze_directory(_UpperCAmelCase , identifier=_UpperCAmelCase , ignore_files=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" UpperCAmelCase__ = Path("""src/transformers""" ) UpperCAmelCase__ = """tokenization""" self.analyze_directory(_UpperCAmelCase , identifier=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" UpperCAmelCase__ = Path("""src/transformers""" ) UpperCAmelCase__ = """configuration""" self.analyze_directory(_UpperCAmelCase , identifier=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" UpperCAmelCase__ = Path("""src/transformers""" ) UpperCAmelCase__ = ["""configuration""", """modeling""", """tokenization"""] self.analyze_directory(_UpperCAmelCase , n_identifier=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" UpperCAmelCase__ = Path("""docs/source""" ) UpperCAmelCase__ = ["""favicon.ico"""] self.analyze_directory(_UpperCAmelCase , ignore_files=_UpperCAmelCase , only_modules=_UpperCAmelCase )
346
0
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowercase__ : Tuple = logging.get_logger(__name__) class UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ ): '''simple docstring''' lowerCAmelCase_ = """maskformer-swin""" lowerCAmelCase_ = { """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers""", } def __init__( self : Dict , __lowercase : Optional[Any]=2_24 , __lowercase : List[str]=4 , __lowercase : str=3 , __lowercase : str=96 , __lowercase : List[str]=[2, 2, 6, 2] , __lowercase : str=[3, 6, 12, 24] , __lowercase : str=7 , __lowercase : int=4.0 , __lowercase : Union[str, Any]=True , __lowercase : List[Any]=0.0 , __lowercase : str=0.0 , __lowercase : int=0.1 , __lowercase : Any="gelu" , __lowercase : Optional[int]=False , __lowercase : int=0.02 , __lowercase : Dict=1E-5 , __lowercase : List[Any]=None , __lowercase : List[Any]=None , **__lowercase : Dict , ): """simple docstring""" super().__init__(**_UpperCAmelCase ) snake_case_ = image_size snake_case_ = patch_size snake_case_ = num_channels snake_case_ = embed_dim snake_case_ = depths snake_case_ = len(_UpperCAmelCase ) snake_case_ = num_heads snake_case_ = window_size snake_case_ = mlp_ratio snake_case_ = qkv_bias snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = drop_path_rate snake_case_ = hidden_act snake_case_ = use_absolute_embeddings snake_case_ = layer_norm_eps snake_case_ = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model snake_case_ = int(embed_dim * 2 ** (len(_UpperCAmelCase ) - 1) ) snake_case_ = ["stem"] + [f"stage{idx}" for idx in range(1 , len(_UpperCAmelCase ) + 1 )] snake_case_ , snake_case_ = get_aligned_output_features_output_indices( out_features=_UpperCAmelCase , out_indices=_UpperCAmelCase , stage_names=self.stage_names )
187
'''simple docstring''' from datasets.utils.patching import _PatchedModuleObj, patch_submodule from . import _test_patching def _UpperCamelCase ( ): '''simple docstring''' import os as original_os from os import path as original_path from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join UpperCAmelCase__ = """__test_patch_submodule_mock__""" with patch_submodule(_test_patching , """os.path.join""" , SCREAMING_SNAKE_CASE__ ): # Every way to access os.path.join must be patched, and the rest must stay untouched # check os.path.join assert isinstance(_test_patching.os , _PatchedModuleObj ) assert isinstance(_test_patching.os.path , _PatchedModuleObj ) assert _test_patching.os.path.join is mock # check path.join assert isinstance(_test_patching.path , _PatchedModuleObj ) assert _test_patching.path.join is mock # check join assert _test_patching.join is mock # check that the other attributes are untouched assert _test_patching.os.rename is original_rename assert _test_patching.path.dirname is original_dirname assert _test_patching.os.path.dirname is original_dirname # Even renamed modules or objects must be patched # check renamed_os.path.join assert isinstance(_test_patching.renamed_os , _PatchedModuleObj ) assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj ) assert _test_patching.renamed_os.path.join is mock # check renamed_path.join assert isinstance(_test_patching.renamed_path , _PatchedModuleObj ) assert _test_patching.renamed_path.join is mock # check renamed_join assert _test_patching.renamed_join is mock # check that the other attributes are untouched assert _test_patching.renamed_os.rename is original_rename assert _test_patching.renamed_path.dirname is original_dirname assert _test_patching.renamed_os.path.dirname is original_dirname # check that everthing is back to normal when the patch is over assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join def _UpperCamelCase ( ): '''simple docstring''' assert _test_patching.open is open UpperCAmelCase__ = """__test_patch_submodule_builtin_mock__""" # _test_patching has "open" in its globals assert _test_patching.open is open with patch_submodule(_test_patching , """open""" , SCREAMING_SNAKE_CASE__ ): assert _test_patching.open is mock # check that everthing is back to normal when the patch is over assert _test_patching.open is open def _UpperCamelCase ( ): '''simple docstring''' UpperCAmelCase__ = """__test_patch_submodule_missing_mock__""" with patch_submodule(_test_patching , """pandas.read_csv""" , SCREAMING_SNAKE_CASE__ ): pass def _UpperCamelCase ( ): '''simple docstring''' UpperCAmelCase__ = """__test_patch_submodule_missing_builtin_mock__""" # _test_patching doesn't have "len" in its globals assert getattr(_test_patching , """len""" , SCREAMING_SNAKE_CASE__ ) is None with patch_submodule(_test_patching , """len""" , SCREAMING_SNAKE_CASE__ ): assert _test_patching.len is mock assert _test_patching.len is len def _UpperCamelCase ( ): '''simple docstring''' UpperCAmelCase__ = """__test_patch_submodule_start_and_stop_mock__""" UpperCAmelCase__ = patch_submodule(_test_patching , """open""" , SCREAMING_SNAKE_CASE__ ) assert _test_patching.open is open patch.start() assert _test_patching.open is mock patch.stop() assert _test_patching.open is open def _UpperCamelCase ( ): '''simple docstring''' from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join UpperCAmelCase__ = """__test_patch_submodule_successive_join__""" UpperCAmelCase__ = """__test_patch_submodule_successive_dirname__""" UpperCAmelCase__ = """__test_patch_submodule_successive_rename__""" assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename with patch_submodule(_test_patching , """os.path.join""" , SCREAMING_SNAKE_CASE__ ): with patch_submodule(_test_patching , """os.rename""" , SCREAMING_SNAKE_CASE__ ): with patch_submodule(_test_patching , """os.path.dirname""" , SCREAMING_SNAKE_CASE__ ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename # try another order with patch_submodule(_test_patching , """os.rename""" , SCREAMING_SNAKE_CASE__ ): with patch_submodule(_test_patching , """os.path.join""" , SCREAMING_SNAKE_CASE__ ): with patch_submodule(_test_patching , """os.path.dirname""" , SCREAMING_SNAKE_CASE__ ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename def _UpperCamelCase ( ): '''simple docstring''' UpperCAmelCase__ = """__test_patch_submodule_doesnt_exist_mock__""" with patch_submodule(_test_patching , """__module_that_doesn_exist__.__attribute_that_doesn_exist__""" , SCREAMING_SNAKE_CASE__ ): pass with patch_submodule(_test_patching , """os.__attribute_that_doesn_exist__""" , SCREAMING_SNAKE_CASE__ ): pass
346
0
'''simple docstring''' import json import os from typing import Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = {'''vocab_file''': '''vocab.json'''} lowerCAmelCase__ = { '''vocab_file''': { '''mgp-str''': '''https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json''', } } lowerCAmelCase__ = {'''mgp-str''': 27} class lowercase_ (lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : int = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : Optional[Any] ,lowercase__ : Dict ,lowercase__ : List[str]="[GO]" ,lowercase__ : Dict="[GO]" ,lowercase__ : str="[s]" ,lowercase__ : Dict="[GO]" ,**lowercase__ : str ): super().__init__( unk_token=_UpperCAmelCase ,bos_token=_UpperCAmelCase ,eos_token=_UpperCAmelCase ,pad_token=_UpperCAmelCase ,**_UpperCAmelCase ,) with open(_UpperCAmelCase ,encoding='''utf-8''' ) as vocab_handle: __lowercase = json.load(_UpperCAmelCase ) __lowercase = {v: k for k, v in self.vocab.items()} @property def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): return len(self.vocab ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): return dict(self.vocab ,**self.added_tokens_encoder ) def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Optional[int] ): __lowercase = [] for s in text: char_tokens.extend(_UpperCAmelCase ) return char_tokens def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : int ): return self.vocab.get(_UpperCAmelCase ,self.vocab.get(self.unk_token ) ) def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Optional[int] ): return self.decoder.get(_UpperCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : str ,lowercase__ : Optional[str] = None ): if not os.path.isdir(_UpperCAmelCase ): logger.error('''Vocabulary path ({}) should be a directory'''.format(_UpperCAmelCase ) ) return __lowercase = os.path.join( _UpperCAmelCase ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) with open(_UpperCAmelCase ,'''w''' ,encoding='''utf-8''' ) as f: f.write(json.dumps(self.vocab ,indent=2 ,sort_keys=_UpperCAmelCase ,ensure_ascii=_UpperCAmelCase ) + '''\n''' ) return (vocab_file,)
104
'''simple docstring''' from timeit import timeit UpperCAmelCase_ = { 'MALAYALAM': True, 'String': False, 'rotor': True, 'level': True, 'A': True, 'BB': True, 'ABC': False, 'amanaplanacanalpanama': True, # "a man a plan a canal panama" } # Ensure our test data is valid assert all((key == key[::-1]) is value for key, value in test_data.items()) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' UpperCAmelCase__ = 0 UpperCAmelCase__ = len(SCREAMING_SNAKE_CASE__ ) - 1 while start_i < end_i: if s[start_i] == s[end_i]: start_i += 1 end_i -= 1 else: return False return True def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' UpperCAmelCase__ = len(SCREAMING_SNAKE_CASE__ ) // 2 UpperCAmelCase__ = len(SCREAMING_SNAKE_CASE__ ) # We need to traverse till half of the length of string # as we can get access of the i'th last element from # i'th index. # eg: [0,1,2,3,4,5] => 4th index can be accessed # with the help of 1st index (i==n-i-1) # where n is length of string return all(s[i] == s[n - i - 1] for i in range(SCREAMING_SNAKE_CASE__ ) ) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' if len(SCREAMING_SNAKE_CASE__ ) <= 2: return True if s[0] == s[len(SCREAMING_SNAKE_CASE__ ) - 1]: return is_palindrome_recursive(s[1:-1] ) else: return False def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' return s == s[::-1] def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' UpperCAmelCase__ = F'''all({name}(key) is value for key, value in test_data.items())''' UpperCAmelCase__ = F'''from __main__ import test_data, {name}''' UpperCAmelCase__ = 500000 UpperCAmelCase__ = timeit(stmt=SCREAMING_SNAKE_CASE__ , setup=SCREAMING_SNAKE_CASE__ , number=SCREAMING_SNAKE_CASE__ ) print(F'''{name:<35} finished {number:,} runs in {result:.5f} seconds''' ) if __name__ == "__main__": for key, value in test_data.items(): assert is_palindrome(key) is is_palindrome_recursive(key) assert is_palindrome(key) is is_palindrome_slice(key) print(f"{key:21} {value}") print('a man a plan a canal panama') # finished 500,000 runs in 0.46793 seconds benchmark_function('is_palindrome_slice') # finished 500,000 runs in 0.85234 seconds benchmark_function('is_palindrome') # finished 500,000 runs in 1.32028 seconds benchmark_function('is_palindrome_recursive') # finished 500,000 runs in 2.08679 seconds benchmark_function('is_palindrome_traversal')
346
0
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = "▁" UpperCamelCase_ = {"vocab_file": "sentencepiece.bpe.model"} UpperCamelCase_ = { "vocab_file": { "facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model", } } UpperCamelCase_ = { "facebook/xglm-564M": 2_0_4_8, } class _a ( lowerCamelCase_ ): '''simple docstring''' A : Optional[int] = VOCAB_FILES_NAMES A : Optional[int] = PRETRAINED_VOCAB_FILES_MAP A : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A : Dict = ["""input_ids""", """attention_mask"""] def __init__( self, A, A="<s>", A="</s>", A="</s>", A="<s>", A="<unk>", A="<pad>", A = None, **A, ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs # Compatibility with the original tokenizer SCREAMING_SNAKE_CASE : int = 7 SCREAMING_SNAKE_CASE : Union[str, Any] = [F"<madeupword{i}>" for i in range(self.num_madeup_words )] SCREAMING_SNAKE_CASE : Optional[Any] = kwargs.get('additional_special_tokens', [] ) kwargs["additional_special_tokens"] += [ word for word in madeup_words if word not in kwargs["additional_special_tokens"] ] super().__init__( bos_token=_UpperCAmelCase, eos_token=_UpperCAmelCase, unk_token=_UpperCAmelCase, sep_token=_UpperCAmelCase, cls_token=_UpperCAmelCase, pad_token=_UpperCAmelCase, sp_model_kwargs=self.sp_model_kwargs, **_UpperCAmelCase, ) SCREAMING_SNAKE_CASE : str = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(_UpperCAmelCase ) ) SCREAMING_SNAKE_CASE : List[Any] = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab SCREAMING_SNAKE_CASE : int = 1 # Mimic fairseq token-to-id alignment for the first 4 token SCREAMING_SNAKE_CASE : Dict = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3} SCREAMING_SNAKE_CASE : Tuple = len(self.sp_model ) SCREAMING_SNAKE_CASE : str = {F"<madeupword{i}>": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )} self.fairseq_tokens_to_ids.update(_UpperCAmelCase ) SCREAMING_SNAKE_CASE : Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = self.__dict__.copy() SCREAMING_SNAKE_CASE : List[Any] = None SCREAMING_SNAKE_CASE : Any = self.sp_model.serialized_model_proto() return state def __setstate__( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = d # for backward compatibility if not hasattr(self, 'sp_model_kwargs' ): SCREAMING_SNAKE_CASE : Dict = {} SCREAMING_SNAKE_CASE : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def UpperCamelCase_ ( self, A, A = None ): '''simple docstring''' if token_ids_a is None: return [self.sep_token_id] + token_ids_a SCREAMING_SNAKE_CASE : Dict = [self.sep_token_id] return sep + token_ids_a + sep + sep + token_ids_a def UpperCamelCase_ ( self, A, A = None, A = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_UpperCAmelCase, token_ids_a=_UpperCAmelCase, already_has_special_tokens=_UpperCAmelCase ) if token_ids_a is None: return [1] + ([0] * len(_UpperCAmelCase )) return [1] + ([0] * len(_UpperCAmelCase )) + [1, 1] + ([0] * len(_UpperCAmelCase )) def UpperCamelCase_ ( self, A, A = None ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = [self.sep_token_id] if token_ids_a is None: return len(sep + token_ids_a ) * [0] return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0] @property def UpperCamelCase_ ( self ): '''simple docstring''' return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def UpperCamelCase_ ( self, A ): '''simple docstring''' return self.sp_model.encode(_UpperCAmelCase, out_type=_UpperCAmelCase ) def UpperCamelCase_ ( self, A ): '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] SCREAMING_SNAKE_CASE : Union[str, Any] = self.sp_model.PieceToId(_UpperCAmelCase ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def UpperCamelCase_ ( self, A ): '''simple docstring''' if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = ''.join(_UpperCAmelCase ).replace(_UpperCAmelCase, ' ' ).strip() return out_string def UpperCamelCase_ ( self, A, A = None ): '''simple docstring''' if not os.path.isdir(_UpperCAmelCase ): logger.error(F"Vocabulary path ({save_directory}) should be a directory" ) return SCREAMING_SNAKE_CASE : str = os.path.join( _UpperCAmelCase, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file, _UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(_UpperCAmelCase, 'wb' ) as fi: SCREAMING_SNAKE_CASE : str = self.sp_model.serialized_model_proto() fi.write(_UpperCAmelCase ) return (out_vocab_file,)
251
'''simple docstring''' import datasets from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py UpperCAmelCase_ = '\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",\n author = "Lin, Chin-Yew and\n Och, Franz Josef",\n booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",\n month = "aug 23{--}aug 27",\n year = "2004",\n address = "Geneva, Switzerland",\n publisher = "COLING",\n url = "https://www.aclweb.org/anthology/C04-1072",\n pages = "501--507",\n}\n' UpperCAmelCase_ = '\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,\nthe better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n' UpperCAmelCase_ = '\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n \'bleu\': bleu score,\n \'precisions\': geometric mean of n-gram precisions,\n \'brevity_penalty\': brevity penalty,\n \'length_ratio\': ratio of lengths,\n \'translation_length\': translation_length,\n \'reference_length\': reference_length\nExamples:\n\n >>> predictions = [\n ... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample\n ... ["foo", "bar", "foobar"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)\n ... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric("bleu")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results["bleu"])\n 1.0\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase_ ( datasets.Metric ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : int ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ), """references""": datasets.Sequence( datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ), } ) , codebase_urls=["""https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"""] , reference_urls=[ """https://en.wikipedia.org/wiki/BLEU""", """https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""", ] , ) def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any]=4 , _UpperCAmelCase : Union[str, Any]=False ): """simple docstring""" UpperCAmelCase__ = compute_bleu( reference_corpus=_UpperCAmelCase , translation_corpus=_UpperCAmelCase , max_order=_UpperCAmelCase , smooth=_UpperCAmelCase ) ((UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__)) = score return { "bleu": bleu, "precisions": precisions, "brevity_penalty": bp, "length_ratio": ratio, "translation_length": translation_length, "reference_length": reference_length, }
346
0
"""simple docstring""" from PIL import Image def _lowercase ( __snake_case ,__snake_case ) -> int: __lowerCAmelCase : List[Any] = (259 * (level + 255)) / (255 * (259 - level)) def contrast(__snake_case ) -> int: return int(128 + factor * (c - 128) ) return img.point(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": # Load image with Image.open('image_data/lena.jpg') as img: # Change contrast to 170 __snake_case : str = change_contrast(img, 170) cont_img.save('image_data/lena_high_contrast.png', format='png')
269
'''simple docstring''' from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import torch from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available @dataclass class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' lowerCAmelCase_ : Union[List[np.ndarray], torch.FloatTensor] try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_text_to_video_synth import TextToVideoSDPipeline from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401 from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
346
0
from __future__ import annotations __a = { '''A''': ['''B''', '''C''', '''E'''], '''B''': ['''A''', '''D''', '''E'''], '''C''': ['''A''', '''F''', '''G'''], '''D''': ['''B'''], '''E''': ['''A''', '''B''', '''D'''], '''F''': ['''C'''], '''G''': ['''C'''], } class __SCREAMING_SNAKE_CASE : def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): lowercase : Union[str, Any] = graph # mapping node to its parent in resulting breadth first tree lowercase : str = {} lowercase : Tuple = source_vertex def __lowerCamelCase ( self ): lowercase : Any = {self.source_vertex} lowercase : int = None lowercase : Optional[Any] = [self.source_vertex] # first in first out queue while queue: lowercase : Optional[Any] = queue.pop(0 ) for adjacent_vertex in self.graph[vertex]: if adjacent_vertex not in visited: visited.add(_UpperCAmelCase ) lowercase : Any = vertex queue.append(_UpperCAmelCase ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ): if target_vertex == self.source_vertex: return self.source_vertex lowercase : List[str] = self.parent.get(_UpperCAmelCase ) if target_vertex_parent is None: lowercase : List[str] = ( f"""No path from vertex: {self.source_vertex} to vertex: {target_vertex}""" ) raise ValueError(_UpperCAmelCase ) return self.shortest_path(_UpperCAmelCase ) + f"""->{target_vertex}""" if __name__ == "__main__": __a = Graph(graph, '''G''') g.breath_first_search() print(g.shortest_path('''D''')) print(g.shortest_path('''G''')) print(g.shortest_path('''Foo'''))
337
'''simple docstring''' import torch import torch.nn as nn from transformers.modeling_utils import ModuleUtilsMixin from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class lowerCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): '''simple docstring''' @register_to_config def __init__( self : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : float , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : bool = False , ): """simple docstring""" super().__init__() UpperCAmelCase__ = nn.Embedding(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = nn.Embedding(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = False UpperCAmelCase__ = nn.Dropout(p=_UpperCAmelCase ) UpperCAmelCase__ = TaConfig( vocab_size=_UpperCAmelCase , d_model=_UpperCAmelCase , num_heads=_UpperCAmelCase , d_kv=_UpperCAmelCase , d_ff=_UpperCAmelCase , dropout_rate=_UpperCAmelCase , feed_forward_proj=_UpperCAmelCase , is_decoder=_UpperCAmelCase , is_encoder_decoder=_UpperCAmelCase , ) UpperCAmelCase__ = nn.ModuleList() for lyr_num in range(_UpperCAmelCase ): UpperCAmelCase__ = TaBlock(_UpperCAmelCase ) self.encoders.append(_UpperCAmelCase ) UpperCAmelCase__ = TaLayerNorm(_UpperCAmelCase ) UpperCAmelCase__ = nn.Dropout(p=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : int , _UpperCAmelCase : str ): """simple docstring""" UpperCAmelCase__ = self.token_embedder(_UpperCAmelCase ) UpperCAmelCase__ = encoder_input_tokens.shape[1] UpperCAmelCase__ = torch.arange(_UpperCAmelCase , device=encoder_input_tokens.device ) x += self.position_encoding(_UpperCAmelCase ) UpperCAmelCase__ = self.dropout_pre(_UpperCAmelCase ) # inverted the attention mask UpperCAmelCase__ = encoder_input_tokens.size() UpperCAmelCase__ = self.get_extended_attention_mask(_UpperCAmelCase , _UpperCAmelCase ) for lyr in self.encoders: UpperCAmelCase__ = lyr(_UpperCAmelCase , _UpperCAmelCase )[0] UpperCAmelCase__ = self.layer_norm(_UpperCAmelCase ) return self.dropout_post(_UpperCAmelCase ), encoder_inputs_mask
346
0
import unittest from pathlib import Path from tempfile import TemporaryDirectory from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available from transformers.models.gpta.tokenization_gpta import GPTaTokenizer from transformers.testing_utils import require_keras_nlp, require_tf, slow if is_tf_available(): import tensorflow as tf if is_keras_nlp_available(): from transformers.models.gpta import TFGPTaTokenizer snake_case_ = ['''gpt2'''] snake_case_ = '''gpt2''' if is_tf_available(): class SCREAMING_SNAKE_CASE__ (tf.Module ): def __init__( self , a): super().__init__() lowercase__ : List[Any] = tokenizer lowercase__ : List[Any] = AutoConfig.from_pretrained(_UpperCAmelCase) lowercase__ : List[Any] = TFGPTaLMHeadModel.from_config(_UpperCAmelCase) @tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='text'),)) def snake_case_ ( self , a): lowercase__ : str = self.tokenizer(_UpperCAmelCase) lowercase__ : Any = tokenized['input_ids'].to_tensor() lowercase__ : Any = tf.cast(input_ids_dense > 0 , tf.intaa) # input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN]) lowercase__ : Any = self.model(input_ids=_UpperCAmelCase , attention_mask=_UpperCAmelCase)['logits'] return outputs @require_tf @require_keras_nlp class SCREAMING_SNAKE_CASE__ (unittest.TestCase ): def snake_case_ ( self): super().setUp() lowercase__ : Dict = [GPTaTokenizer.from_pretrained(_UpperCAmelCase) for checkpoint in (TOKENIZER_CHECKPOINTS)] lowercase__ : Dict = [TFGPTaTokenizer.from_pretrained(_UpperCAmelCase) for checkpoint in TOKENIZER_CHECKPOINTS] assert len(self.tokenizers) == len(self.tf_tokenizers) lowercase__ : List[Any] = [ 'This is a straightforward English test sentence.', 'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.', 'Now we\'re going to add some Chinese: 一 二 三 一二三', 'And some much more rare Chinese: 齉 堃 齉堃', 'Je vais aussi écrire en français pour tester les accents', 'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ', ] lowercase__ : str = list(zip(self.test_sentences , self.test_sentences[::-1])) def snake_case_ ( self): for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers): for test_inputs in self.test_sentences: lowercase__ : List[Any] = tokenizer([test_inputs] , return_tensors='tf') lowercase__ : Any = tf_tokenizer([test_inputs]) for key in python_outputs.keys(): # convert them to numpy to avoid messing with ragged tensors lowercase__ : Optional[int] = python_outputs[key].numpy() lowercase__ : Tuple = tf_outputs[key].numpy() self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape)) self.assertTrue(tf.reduce_all(tf.cast(_UpperCAmelCase , tf.intaa) == tf_outputs_values)) @slow def snake_case_ ( self): for tf_tokenizer in self.tf_tokenizers: lowercase__ : List[str] = tf.function(_UpperCAmelCase) for test_inputs in self.test_sentences: lowercase__ : str = tf.constant(_UpperCAmelCase) lowercase__ : Tuple = compiled_tokenizer(_UpperCAmelCase) lowercase__ : Optional[Any] = tf_tokenizer(_UpperCAmelCase) for key in eager_outputs.keys(): self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key])) @slow def snake_case_ ( self): for tf_tokenizer in self.tf_tokenizers: lowercase__ : int = ModelToSave(tokenizer=_UpperCAmelCase) lowercase__ : Tuple = tf.convert_to_tensor([self.test_sentences[0]]) lowercase__ : List[str] = model.serving(_UpperCAmelCase) # Build model with some sample inputs with TemporaryDirectory() as tempdir: lowercase__ : Optional[int] = Path(_UpperCAmelCase) / 'saved.model' tf.saved_model.save(_UpperCAmelCase , _UpperCAmelCase , signatures={'serving_default': model.serving}) lowercase__ : List[Any] = tf.saved_model.load(_UpperCAmelCase) lowercase__ : Optional[Any] = loaded_model.signatures['serving_default'](_UpperCAmelCase)['output_0'] # We may see small differences because the loaded model is compiled, so we need an epsilon for the test self.assertTrue(tf.reduce_all(out == loaded_output)) @slow def snake_case_ ( self): for tf_tokenizer in self.tf_tokenizers: lowercase__ : Union[str, Any] = tf.convert_to_tensor([self.test_sentences[0]]) lowercase__ : Optional[Any] = tf_tokenizer(_UpperCAmelCase) # Build model with some sample inputs lowercase__ : str = tf_tokenizer.get_config() lowercase__ : Dict = TFGPTaTokenizer.from_config(_UpperCAmelCase) lowercase__ : Dict = model_from_config(_UpperCAmelCase) for key in from_config_output.keys(): self.assertTrue(tf.reduce_all(from_config_output[key] == out[key])) @slow def snake_case_ ( self): for tf_tokenizer in self.tf_tokenizers: # for the test to run lowercase__ : Tuple = 12_3123 for max_length in [3, 5, 1024]: lowercase__ : Tuple = tf.convert_to_tensor([self.test_sentences[0]]) lowercase__ : Tuple = tf_tokenizer(_UpperCAmelCase , max_length=_UpperCAmelCase) lowercase__ : Dict = out['input_ids'].numpy().shape[1] assert out_length == max_length
214
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConfig, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaForPreTraining, WavaVecaProcessor, logging, ) from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification logging.set_verbosity_info() UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'adapter_layer': 'encoder.layers.*.adapter_layer', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', 'pooling_layer.linear': 'projector', 'pooling_layer.projection': 'classifier', } UpperCAmelCase_ = [ 'lm_head', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', 'projector', 'classifier', ] def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Tuple ): '''simple docstring''' UpperCAmelCase__ = {} with open(SCREAMING_SNAKE_CASE__ , """r""" ) as file: for line_number, line in enumerate(SCREAMING_SNAKE_CASE__ ): UpperCAmelCase__ = line.strip() if line: UpperCAmelCase__ = line.split() UpperCAmelCase__ = line_number UpperCAmelCase__ = words[0] UpperCAmelCase__ = value return result def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ): '''simple docstring''' for attribute in key.split(""".""" ): UpperCAmelCase__ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(SCREAMING_SNAKE_CASE__ ): UpperCAmelCase__ = PARAM_MAPPING[full_name.split(""".""" )[-1]] UpperCAmelCase__ = """param""" if weight_type is not None and weight_type != "param": UpperCAmelCase__ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).shape elif weight_type is not None and weight_type == "param": UpperCAmelCase__ = hf_pointer for attribute in hf_param_name.split(""".""" ): UpperCAmelCase__ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = shape_pointer.shape # let's reduce dimension UpperCAmelCase__ = value[0] else: UpperCAmelCase__ = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": UpperCAmelCase__ = value elif weight_type == "weight_g": UpperCAmelCase__ = value elif weight_type == "weight_v": UpperCAmelCase__ = value elif weight_type == "bias": UpperCAmelCase__ = value elif weight_type == "param": for attribute in hf_param_name.split(""".""" ): UpperCAmelCase__ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = value else: UpperCAmelCase__ = value logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' ) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(SCREAMING_SNAKE_CASE__ ): UpperCAmelCase__ = PARAM_MAPPING[full_name.split(""".""" )[-1]] UpperCAmelCase__ = """param""" if weight_type is not None and weight_type != "param": UpperCAmelCase__ = """.""".join([key, weight_type] ) elif weight_type is not None and weight_type == "param": UpperCAmelCase__ = """.""".join([key, hf_param_name] ) else: UpperCAmelCase__ = key UpperCAmelCase__ = value if """lm_head""" in full_key else value[0] UpperCAmelCase_ = { 'W_a': 'linear_1.weight', 'W_b': 'linear_2.weight', 'b_a': 'linear_1.bias', 'b_b': 'linear_2.bias', 'ln_W': 'norm.weight', 'ln_b': 'norm.bias', } def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None ): '''simple docstring''' UpperCAmelCase__ = False for key, mapped_key in MAPPING.items(): UpperCAmelCase__ = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: UpperCAmelCase__ = True if "*" in mapped_key: UpperCAmelCase__ = name.split(SCREAMING_SNAKE_CASE__ )[0].split(""".""" )[-2] UpperCAmelCase__ = mapped_key.replace("""*""" , SCREAMING_SNAKE_CASE__ ) if "weight_g" in name: UpperCAmelCase__ = """weight_g""" elif "weight_v" in name: UpperCAmelCase__ = """weight_v""" elif "bias" in name: UpperCAmelCase__ = """bias""" elif "weight" in name: # TODO: don't match quantizer.weight_proj UpperCAmelCase__ = """weight""" else: UpperCAmelCase__ = None if hf_dict is not None: rename_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else: set_recursively(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return is_used return is_used def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] ): '''simple docstring''' UpperCAmelCase__ = [] UpperCAmelCase__ = fairseq_model.state_dict() UpperCAmelCase__ = hf_model.wavaveca.feature_extractor for name, value in fairseq_dict.items(): UpperCAmelCase__ = False if "conv_layers" in name: load_conv_layer( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , hf_model.config.feat_extract_norm == """group""" , ) UpperCAmelCase__ = True else: UpperCAmelCase__ = load_wavaveca_layer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if not is_used: unused_weights.append(SCREAMING_SNAKE_CASE__ ) logger.warning(F'''Unused weights: {unused_weights}''' ) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' UpperCAmelCase__ = full_name.split("""conv_layers.""" )[-1] UpperCAmelCase__ = name.split(""".""" ) UpperCAmelCase__ = int(items[0] ) UpperCAmelCase__ = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) UpperCAmelCase__ = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) UpperCAmelCase__ = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' ) UpperCAmelCase__ = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' ) UpperCAmelCase__ = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(SCREAMING_SNAKE_CASE__ ) @torch.no_grad() def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False ): '''simple docstring''' if config_path is not None: UpperCAmelCase__ = WavaVecaConfig.from_pretrained(SCREAMING_SNAKE_CASE__ ) else: UpperCAmelCase__ = WavaVecaConfig() if is_seq_class: UpperCAmelCase__ = read_txt_into_dict(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = idalabel UpperCAmelCase__ = WavaVecaForSequenceClassification(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , ) feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE__ ) elif is_finetuned: if dict_path: UpperCAmelCase__ = Dictionary.load(SCREAMING_SNAKE_CASE__ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq UpperCAmelCase__ = target_dict.pad_index UpperCAmelCase__ = target_dict.bos_index UpperCAmelCase__ = target_dict.eos_index UpperCAmelCase__ = len(target_dict.symbols ) UpperCAmelCase__ = os.path.join(SCREAMING_SNAKE_CASE__ , """vocab.json""" ) if not os.path.isdir(SCREAMING_SNAKE_CASE__ ): logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(SCREAMING_SNAKE_CASE__ ) ) return os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = target_dict.indices # fairseq has the <pad> and <s> switched UpperCAmelCase__ = 0 UpperCAmelCase__ = 1 with open(SCREAMING_SNAKE_CASE__ , """w""" , encoding="""utf-8""" ) as vocab_handle: json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = WavaVecaCTCTokenizer( SCREAMING_SNAKE_CASE__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=SCREAMING_SNAKE_CASE__ , ) UpperCAmelCase__ = True if config.feat_extract_norm == """layer""" else False UpperCAmelCase__ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , ) UpperCAmelCase__ = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ ) processor.save_pretrained(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = WavaVecaForCTC(SCREAMING_SNAKE_CASE__ ) else: UpperCAmelCase__ = WavaVecaForPreTraining(SCREAMING_SNAKE_CASE__ ) if is_finetuned or is_seq_class: UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) else: UpperCAmelCase__ = argparse.Namespace(task="""audio_pretraining""" ) UpperCAmelCase__ = fairseq.tasks.setup_task(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = model[0].eval() recursively_load_weights(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , not is_finetuned ) hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not' ) parser.add_argument( '--is_seq_class', action='store_true', help='Whether the model to convert is a fine-tuned sequence classification model or not', ) UpperCAmelCase_ = parser.parse_args() UpperCAmelCase_ = not args.not_finetuned and not args.is_seq_class convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, is_finetuned, args.is_seq_class, )
346
0
import datasets import faiss import numpy as np import streamlit as st import torch from elasticsearch import Elasticsearch from elia_utils import ( embed_questions_for_retrieval, make_qa_sas_model, qa_sas_generate, query_es_index, query_qa_dense_index, ) import transformers from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer __lowerCAmelCase : Optional[int] ='bart' __lowerCAmelCase : str =True @st.cache(allow_output_mutation=SCREAMING_SNAKE_CASE__ ) def _UpperCamelCase ( ): if LOAD_DENSE_INDEX: __SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' ) __SCREAMING_SNAKE_CASE : Optional[Any] = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' ) __SCREAMING_SNAKE_CASE : Optional[Any] = qar_model.eval() else: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int = (None, None) if MODEL_TYPE == "bart": __SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' ) __SCREAMING_SNAKE_CASE : Dict = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' ) __SCREAMING_SNAKE_CASE : Tuple = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' ) sas_model.load_state_dict(save_dict['''model'''] ) __SCREAMING_SNAKE_CASE : Dict = sas_model.eval() else: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = make_qa_sas_model( model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' ) return (qar_tokenizer, qar_model, sas_tokenizer, sas_model) @st.cache(allow_output_mutation=SCREAMING_SNAKE_CASE__ ) def _UpperCamelCase ( ): if LOAD_DENSE_INDEX: __SCREAMING_SNAKE_CASE : Union[str, Any] = faiss.StandardGpuResources() __SCREAMING_SNAKE_CASE : List[Any] = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train'''] __SCREAMING_SNAKE_CASE : str = np.memmap( '''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 128) , ) __SCREAMING_SNAKE_CASE : Optional[int] = faiss.IndexFlatIP(128 ) __SCREAMING_SNAKE_CASE : str = faiss.index_cpu_to_gpu(SCREAMING_SNAKE_CASE__ , 1 , SCREAMING_SNAKE_CASE__ ) wikiaab_gpu_index_flat.add(SCREAMING_SNAKE_CASE__ ) # TODO fix for larger GPU else: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = (None, None) __SCREAMING_SNAKE_CASE : Union[str, Any] = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] ) return (wikiaab_passages, wikiaab_gpu_index_flat, es_client) @st.cache(allow_output_mutation=SCREAMING_SNAKE_CASE__ ) def _UpperCamelCase ( ): __SCREAMING_SNAKE_CASE : List[Any] = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' ) __SCREAMING_SNAKE_CASE : Any = elia['''train_eli5'''] __SCREAMING_SNAKE_CASE : int = np.memmap( '''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 128) ) __SCREAMING_SNAKE_CASE : Tuple = faiss.IndexFlatIP(128 ) eli5_train_q_index.add(SCREAMING_SNAKE_CASE__ ) return (elia_train, eli5_train_q_index) __lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase : Optional[Any] =load_indexes() __lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase : Optional[Any] =load_models() __lowerCAmelCase ,__lowerCAmelCase : List[Any] =load_train_data() def _UpperCamelCase ( lowercase__ , lowercase__=10 ): __SCREAMING_SNAKE_CASE : int = embed_questions_for_retrieval([question] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = eli5_train_q_index.search(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __SCREAMING_SNAKE_CASE : Any = [elia_train[int(SCREAMING_SNAKE_CASE__ )] for i in I[0]] return nn_examples def _UpperCamelCase ( lowercase__ , lowercase__="wiki40b" , lowercase__="dense" , lowercase__=10 ): if source == "none": __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), []) else: if method == "dense": __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = query_qa_dense_index( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = query_es_index( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , index_name='''english_wiki40b_snippets_100w''' , n_results=SCREAMING_SNAKE_CASE__ , ) __SCREAMING_SNAKE_CASE : List[Any] = [ (res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst ] __SCREAMING_SNAKE_CASE : Tuple = '''question: {} context: {}'''.format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return question_doc, support_list @st.cache( hash_funcs={ torch.Tensor: (lambda lowercase__ : None), transformers.models.bart.tokenization_bart.BartTokenizer: (lambda lowercase__ : None), } ) def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__=64 , lowercase__=256 , lowercase__=False , lowercase__=2 , lowercase__=0.95 , lowercase__=0.8 ): with torch.no_grad(): __SCREAMING_SNAKE_CASE : int = qa_sas_generate( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , num_answers=1 , num_beams=SCREAMING_SNAKE_CASE__ , min_len=SCREAMING_SNAKE_CASE__ , max_len=SCREAMING_SNAKE_CASE__ , do_sample=SCREAMING_SNAKE_CASE__ , temp=SCREAMING_SNAKE_CASE__ , top_p=SCREAMING_SNAKE_CASE__ , top_k=SCREAMING_SNAKE_CASE__ , max_input_length=1024 , device='''cuda:0''' , )[0] return (answer, support_list) st.title('Long Form Question Answering with ELI5') # Start sidebar __lowerCAmelCase : Union[str, Any] ='<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>' __lowerCAmelCase : List[str] ='\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class="img-container"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n' % ( header_html, ) st.sidebar.markdown( header_full, unsafe_allow_html=True, ) # Long Form QA with ELI5 and Wikipedia __lowerCAmelCase : Dict ='\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n' st.sidebar.markdown(description, unsafe_allow_html=True) __lowerCAmelCase : List[str] =[ 'Answer the question', 'View the retrieved document only', 'View the most similar ELI5 question and answer', 'Show me everything, please!', ] __lowerCAmelCase : Union[str, Any] =st.sidebar.checkbox('Demo options') if demo_options: __lowerCAmelCase : Union[str, Any] =st.sidebar.selectbox( '', action_list, index=3, ) __lowerCAmelCase : Any =action_list.index(action_st) __lowerCAmelCase : Optional[int] =st.sidebar.selectbox( '', ['Show full text of passages', 'Show passage section titles'], index=0, ) __lowerCAmelCase : Dict =show_type == 'Show full text of passages' else: __lowerCAmelCase : List[str] =3 __lowerCAmelCase : Tuple =True __lowerCAmelCase : Optional[int] =st.sidebar.checkbox('Retrieval options') if retrieval_options: __lowerCAmelCase : List[Any] ='\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n ' st.sidebar.markdown(retriever_info) __lowerCAmelCase : List[str] =st.sidebar.selectbox('Which Wikipedia format should the model use?', ['wiki40b', 'none']) __lowerCAmelCase : int =st.sidebar.selectbox('Which Wikipedia indexer should the model use?', ['dense', 'sparse', 'mixed']) else: __lowerCAmelCase : Union[str, Any] ='wiki40b' __lowerCAmelCase : Tuple ='dense' __lowerCAmelCase : Union[str, Any] ='beam' __lowerCAmelCase : Optional[Any] =2 __lowerCAmelCase : Optional[Any] =6_4 __lowerCAmelCase : Optional[int] =2_5_6 __lowerCAmelCase : int =None __lowerCAmelCase : Any =None __lowerCAmelCase : int =st.sidebar.checkbox('Generation options') if generate_options: __lowerCAmelCase : List[Any] ='\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder\'s output probabilities.\n ' st.sidebar.markdown(generate_info) __lowerCAmelCase : Optional[int] =st.sidebar.selectbox('Would you like to use beam search or sample an answer?', ['beam', 'sampled']) __lowerCAmelCase : Union[str, Any] =st.sidebar.slider( 'Minimum generation length', min_value=8, max_value=2_5_6, value=6_4, step=8, format=None, key=None ) __lowerCAmelCase : Dict =st.sidebar.slider( 'Maximum generation length', min_value=6_4, max_value=5_1_2, value=2_5_6, step=1_6, format=None, key=None ) if sampled == "beam": __lowerCAmelCase : str =st.sidebar.slider('Beam size', min_value=1, max_value=8, value=2, step=None, format=None, key=None) else: __lowerCAmelCase : Dict =st.sidebar.slider( 'Nucleus sampling p', min_value=0.1, max_value=1.0, value=0.9_5, step=0.0_1, format=None, key=None ) __lowerCAmelCase : Optional[Any] =st.sidebar.slider( 'Temperature', min_value=0.1, max_value=1.0, value=0.7, step=0.0_1, format=None, key=None ) __lowerCAmelCase : str =None # start main text __lowerCAmelCase : Dict =[ '<MY QUESTION>', 'How do people make chocolate?', 'Why do we get a fever when we are sick?', 'How can different animals perceive different colors?', 'What is natural language processing?', 'What\'s the best way to treat a sunburn?', 'What exactly are vitamins ?', 'How does nuclear energy provide electricity?', 'What\'s the difference between viruses and bacteria?', 'Why are flutes classified as woodwinds when most of them are made out of metal ?', 'Why do people like drinking coffee even though it tastes so bad?', 'What happens when wine ages? How does it make the wine taste better?', 'If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?', 'How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?', 'How does New Zealand have so many large bird predators?', ] __lowerCAmelCase : Union[str, Any] =st.selectbox( 'What would you like to ask? ---- select <MY QUESTION> to enter a new query', questions_list, index=1, ) if question_s == "<MY QUESTION>": __lowerCAmelCase : Optional[Any] =st.text_input('Enter your question here:', '') else: __lowerCAmelCase : int =question_s if st.button('Show me!'): if action in [0, 1, 3]: if index_type == "mixed": __lowerCAmelCase ,__lowerCAmelCase : List[Any] =make_support(question, source=wiki_source, method='dense', n_results=1_0) __lowerCAmelCase ,__lowerCAmelCase : Tuple =make_support(question, source=wiki_source, method='sparse', n_results=1_0) __lowerCAmelCase : Dict =[] for res_d, res_s in zip(support_list_dense, support_list_sparse): if tuple(res_d) not in support_list: support_list += [tuple(res_d)] if tuple(res_s) not in support_list: support_list += [tuple(res_s)] __lowerCAmelCase : Union[str, Any] =support_list[:1_0] __lowerCAmelCase : str ='<P> ' + ' <P> '.join([res[-1] for res in support_list]) else: __lowerCAmelCase ,__lowerCAmelCase : str =make_support(question, source=wiki_source, method=index_type, n_results=1_0) if action in [0, 3]: __lowerCAmelCase ,__lowerCAmelCase : Dict =answer_question( question_doc, sas_model, sas_tokenizer, min_len=min_len, max_len=int(max_len), sampling=(sampled == 'sampled'), n_beams=n_beams, top_p=top_p, temp=temp, ) st.markdown('### The model generated answer is:') st.write(answer) if action in [0, 1, 3] and wiki_source != "none": st.markdown('--- \n ### The model is drawing information from the following Wikipedia passages:') for i, res in enumerate(support_list): __lowerCAmelCase : Dict ='https://en.wikipedia.org/wiki/{}'.format(res[0].replace(' ', '_')) __lowerCAmelCase : Union[str, Any] =res[1].strip() if sec_titles == "": __lowerCAmelCase : Optional[int] ='[{}]({})'.format(res[0], wiki_url) else: __lowerCAmelCase : List[Any] =sec_titles.split(' & ') __lowerCAmelCase : Optional[Any] =' & '.join( ['[{}]({}#{})'.format(sec.strip(), wiki_url, sec.strip().replace(' ', '_')) for sec in sec_list] ) st.markdown( '{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'.format(i + 1, res[0], sections), unsafe_allow_html=True, ) if show_passages: st.write( '> <span style="font-family:arial; font-size:10pt;">' + res[-1] + '</span>', unsafe_allow_html=True ) if action in [2, 3]: __lowerCAmelCase : Tuple =find_nearest_training(question) __lowerCAmelCase : str =nn_train_list[0] st.markdown( '--- \n ### The most similar question in the ELI5 training set was: \n\n {}'.format(train_exple['title']) ) __lowerCAmelCase : Optional[Any] =[ '{}. {}'.format(i + 1, ' \n'.join([line.strip() for line in ans.split('\n') if line.strip() != ''])) for i, (ans, sc) in enumerate(zip(train_exple['answers']['text'], train_exple['answers']['score'])) if i == 0 or sc > 2 ] st.markdown('##### Its answers were: \n\n {}'.format('\n'.join(answers_st))) __lowerCAmelCase : Optional[int] ='\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n' st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
9
'''simple docstring''' import itertools import os from collections import Counter, defaultdict from concurrent.futures import ThreadPoolExecutor, as_completed import numpy as np import datasets from .execute import check_correctness UpperCAmelCase_ = '\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n' UpperCAmelCase_ = '\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper "Evaluating Large Language Models Trained on Code"\n(https://arxiv.org/abs/2107.03374).\n' UpperCAmelCase_ = '\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric("code_eval")\n >>> test_cases = ["assert add(2,3)==5"]\n >>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {\'pass@1\': 0.5, \'pass@2\': 1.0}\n' UpperCAmelCase_ = '\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe "code_eval" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper "Evaluating Large\nLanguage Models Trained on Code" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"\n\n################################################################################\\n' UpperCAmelCase_ = 'The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the "Software"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase_ ( datasets.Metric ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" return datasets.MetricInfo( # This is the description that will appear on the metrics page. description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""" ) ), """references""": datasets.Value("""string""" ), } ) , homepage="""https://github.com/openai/human-eval""" , codebase_urls=["""https://github.com/openai/human-eval"""] , reference_urls=["""https://github.com/openai/human-eval"""] , license=_LICENSE , ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str]=[1, 10, 1_00] , _UpperCAmelCase : Optional[Any]=4 , _UpperCAmelCase : Any=3.0 ): """simple docstring""" if os.getenv("""HF_ALLOW_CODE_EVAL""" , 0 ) != "1": raise ValueError(_WARNING ) if os.name == "nt": raise NotImplementedError("""This metric is currently not supported on Windows.""" ) with ThreadPoolExecutor(max_workers=_UpperCAmelCase ) as executor: UpperCAmelCase__ = [] UpperCAmelCase__ = Counter() UpperCAmelCase__ = 0 UpperCAmelCase__ = defaultdict(_UpperCAmelCase ) for task_id, (candidates, test_case) in enumerate(zip(_UpperCAmelCase , _UpperCAmelCase ) ): for candidate in candidates: UpperCAmelCase__ = candidate + """\n""" + test_case UpperCAmelCase__ = (test_program, timeout, task_id, completion_id[task_id]) UpperCAmelCase__ = executor.submit(_UpperCAmelCase , *_UpperCAmelCase ) futures.append(_UpperCAmelCase ) completion_id[task_id] += 1 n_samples += 1 for future in as_completed(_UpperCAmelCase ): UpperCAmelCase__ = future.result() results[result["task_id"]].append((result["""completion_id"""], result) ) UpperCAmelCase__ , UpperCAmelCase__ = [], [] for result in results.values(): result.sort() UpperCAmelCase__ = [r[1]["""passed"""] for r in result] total.append(len(_UpperCAmelCase ) ) correct.append(sum(_UpperCAmelCase ) ) UpperCAmelCase__ = np.array(_UpperCAmelCase ) UpperCAmelCase__ = np.array(_UpperCAmelCase ) UpperCAmelCase__ = k UpperCAmelCase__ = {f'''pass@{k}''': estimate_pass_at_k(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).mean() for k in ks if (total >= k).all()} return pass_at_k, results def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] ): '''simple docstring''' def estimator(SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> float: if n - c < k: return 1.0 return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): UpperCAmelCase__ = itertools.repeat(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ) else: assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = iter(SCREAMING_SNAKE_CASE__ ) return np.array([estimator(int(SCREAMING_SNAKE_CASE__ ) , int(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) for n, c in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )] )
346
0
"""simple docstring""" import argparse import requests import torch from PIL import Image from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> List[str]: if "cls_token" in name: lowercase__: Any = name.replace('''cls_token''' , '''vit.embeddings.cls_token''' ) if "mask_token" in name: lowercase__: Dict = name.replace('''mask_token''' , '''decoder.mask_token''' ) if "decoder_pos_embed" in name: lowercase__: Dict = name.replace('''decoder_pos_embed''' , '''decoder.decoder_pos_embed''' ) if "pos_embed" in name and "decoder" not in name: lowercase__: str = name.replace('''pos_embed''' , '''vit.embeddings.position_embeddings''' ) if "patch_embed.proj" in name: lowercase__: Optional[Any] = name.replace('''patch_embed.proj''' , '''vit.embeddings.patch_embeddings.projection''' ) if "patch_embed.norm" in name: lowercase__: Union[str, Any] = name.replace('''patch_embed.norm''' , '''vit.embeddings.norm''' ) if "decoder_blocks" in name: lowercase__: List[str] = name.replace('''decoder_blocks''' , '''decoder.decoder_layers''' ) if "blocks" in name: lowercase__: List[Any] = name.replace('''blocks''' , '''vit.encoder.layer''' ) if "attn.proj" in name: lowercase__: Dict = name.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in name: lowercase__: Optional[Any] = name.replace('''attn''' , '''attention.self''' ) if "norm1" in name: lowercase__: Tuple = name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: lowercase__: Any = name.replace('''norm2''' , '''layernorm_after''' ) if "mlp.fc1" in name: lowercase__: Optional[int] = name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: lowercase__: str = name.replace('''mlp.fc2''' , '''output.dense''' ) if "decoder_embed" in name: lowercase__: Optional[Any] = name.replace('''decoder_embed''' , '''decoder.decoder_embed''' ) if "decoder_norm" in name: lowercase__: Any = name.replace('''decoder_norm''' , '''decoder.decoder_norm''' ) if "decoder_pred" in name: lowercase__: Dict = name.replace('''decoder_pred''' , '''decoder.decoder_pred''' ) if "norm.weight" in name and "decoder" not in name: lowercase__: Optional[Any] = name.replace('''norm.weight''' , '''vit.layernorm.weight''' ) if "norm.bias" in name and "decoder" not in name: lowercase__: Optional[int] = name.replace('''norm.bias''' , '''vit.layernorm.bias''' ) return name def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]: for key in orig_state_dict.copy().keys(): lowercase__: Any = orig_state_dict.pop(SCREAMING_SNAKE_CASE__ ) if "qkv" in key: lowercase__: Tuple = key.split('''.''' ) lowercase__: str = int(key_split[1] ) if "decoder_blocks" in key: lowercase__: Optional[int] = config.decoder_hidden_size lowercase__: List[str] = '''decoder.decoder_layers.''' if "weight" in key: lowercase__: Optional[int] = val[:dim, :] lowercase__: str = val[dim : dim * 2, :] lowercase__: int = val[-dim:, :] elif "bias" in key: lowercase__: List[str] = val[:dim] lowercase__: Tuple = val[dim : dim * 2] lowercase__: Dict = val[-dim:] else: lowercase__: int = config.hidden_size lowercase__: Optional[int] = '''vit.encoder.layer.''' if "weight" in key: lowercase__: str = val[:dim, :] lowercase__: Dict = val[dim : dim * 2, :] lowercase__: Optional[int] = val[-dim:, :] elif "bias" in key: lowercase__: str = val[:dim] lowercase__: Union[str, Any] = val[dim : dim * 2] lowercase__: str = val[-dim:] else: lowercase__: Any = val return orig_state_dict def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> List[str]: lowercase__: Optional[Any] = ViTMAEConfig() if "large" in checkpoint_url: lowercase__: List[Any] = 1_0_2_4 lowercase__: int = 4_0_9_6 lowercase__: Any = 2_4 lowercase__: Dict = 1_6 elif "huge" in checkpoint_url: lowercase__: Optional[int] = 1_4 lowercase__: int = 1_2_8_0 lowercase__: Optional[int] = 5_1_2_0 lowercase__: Optional[int] = 3_2 lowercase__: Union[str, Any] = 1_6 lowercase__: Any = ViTMAEForPreTraining(SCREAMING_SNAKE_CASE__ ) lowercase__: Tuple = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ , map_location='''cpu''' )['''model'''] lowercase__: Any = ViTMAEImageProcessor(size=config.image_size ) lowercase__: Tuple = convert_state_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) model.load_state_dict(SCREAMING_SNAKE_CASE__ ) model.eval() lowercase__: str = '''https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg''' lowercase__: Optional[Any] = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw ) lowercase__: Dict = ViTMAEImageProcessor(size=config.image_size ) lowercase__: Union[str, Any] = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' ) # forward pass torch.manual_seed(2 ) lowercase__: List[str] = model(**SCREAMING_SNAKE_CASE__ ) lowercase__: int = outputs.logits if "large" in checkpoint_url: lowercase__: List[Any] = torch.tensor( [[-0.7_3_0_9, -0.7_1_2_8, -1.0_1_6_9], [-1.0_1_6_1, -0.9_0_5_8, -1.1_8_7_8], [-1.0_4_7_8, -0.9_4_1_1, -1.1_9_1_1]] ) elif "huge" in checkpoint_url: lowercase__: Optional[int] = torch.tensor( [[-1.1_5_9_9, -0.9_1_9_9, -1.2_2_2_1], [-1.1_9_5_2, -0.9_2_6_9, -1.2_3_0_7], [-1.2_1_4_3, -0.9_3_3_7, -1.2_2_6_2]] ) else: lowercase__: Tuple = torch.tensor( [[-0.9_1_9_2, -0.8_4_8_1, -1.1_2_5_9], [-1.1_3_4_9, -1.0_0_3_4, -1.2_5_9_9], [-1.1_7_5_7, -1.0_4_2_9, -1.2_7_2_6]] ) # verify logits assert torch.allclose(logits[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) print(F"""Saving model to {pytorch_dump_folder_path}""" ) model.save_pretrained(SCREAMING_SNAKE_CASE__ ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": __A = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint_url", default="https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth", type=str, help="URL of the checkpoint you\'d like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) __A = parser.parse_args() convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
177
'''simple docstring''' import math def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or not number % 2: # Negatives, 0, 1 and all even numbers are not primes return False UpperCAmelCase__ = range(3 , int(math.sqrt(SCREAMING_SNAKE_CASE__ ) + 1 ) , 2 ) return not any(not number % i for i in odd_numbers ) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str]=1 , **SCREAMING_SNAKE_CASE__ : List[str] ): '''simple docstring''' UpperCAmelCase__ = factor * value UpperCAmelCase__ = value while not is_prime(SCREAMING_SNAKE_CASE__ ): value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1 if value == first_value_val: return next_prime(value + 1 , **SCREAMING_SNAKE_CASE__ ) return value
346
0
'''simple docstring''' import unittest from pathlib import Path from tempfile import TemporaryDirectory from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available from transformers.models.bert.tokenization_bert import BertTokenizer from transformers.testing_utils import require_tensorflow_text, require_tf, slow if is_tf_available(): import tensorflow as tf if is_tensorflow_text_available(): from transformers.models.bert import TFBertTokenizer __a: Dict = ["""bert-base-uncased""", """bert-base-cased"""] __a: int = """hf-internal-testing/tiny-bert-tf-only""" if is_tf_available(): class UpperCAmelCase ( tf.keras.Model ): '''simple docstring''' def __init__( self , __lowerCAmelCase ) -> int: super().__init__() lowercase__ : List[Any] = tokenizer lowercase__ : Optional[int] = AutoConfig.from_pretrained(_UpperCAmelCase ) lowercase__ : str = TFAutoModel.from_config(_UpperCAmelCase ) def _lowerCAmelCase( self , __lowerCAmelCase ) -> str: lowercase__ : str = self.tokenizer(_UpperCAmelCase ) lowercase__ : Any = self.bert(**_UpperCAmelCase ) return out["pooler_output"] @require_tf @require_tensorflow_text class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def _lowerCAmelCase( self ) -> Dict: super().setUp() lowercase__ : List[Any] = [ BertTokenizer.from_pretrained(_UpperCAmelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2) ] # repeat for when fast_bert_tokenizer=false lowercase__ : Union[str, Any] = [TFBertTokenizer.from_pretrained(_UpperCAmelCase ) for checkpoint in TOKENIZER_CHECKPOINTS] + [ TFBertTokenizer.from_pretrained(_UpperCAmelCase , use_fast_bert_tokenizer=_UpperCAmelCase ) for checkpoint in TOKENIZER_CHECKPOINTS ] assert len(self.tokenizers ) == len(self.tf_tokenizers ) lowercase__ : int = [ '''This is a straightforward English test sentence.''', '''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''', '''Now we\'re going to add some Chinese: 一 二 三 一二三''', '''And some much more rare Chinese: 齉 堃 齉堃''', '''Je vais aussi écrire en français pour tester les accents''', '''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''', ] lowercase__ : Optional[Any] = list(zip(self.test_sentences , self.test_sentences[::-1] ) ) def _lowerCAmelCase( self ) -> Optional[Any]: for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ): for test_inputs in (self.test_sentences, self.paired_sentences): lowercase__ : Dict = tokenizer(_UpperCAmelCase , return_tensors='''tf''' , padding='''longest''' ) lowercase__ : Any = tf_tokenizer(_UpperCAmelCase ) for key in python_outputs.keys(): self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) ) self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) ) @slow def _lowerCAmelCase( self ) -> Optional[Any]: for tf_tokenizer in self.tf_tokenizers: lowercase__ : str = tf_tokenizer(self.paired_sentences ) lowercase__ : Dict = tf_tokenizer( text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , ) for key in merged_outputs.keys(): self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) ) @slow def _lowerCAmelCase( self ) -> Optional[Any]: for tf_tokenizer in self.tf_tokenizers: lowercase__ : Dict = tf.function(_UpperCAmelCase ) for test_inputs in (self.test_sentences, self.paired_sentences): lowercase__ : List[str] = tf.constant(_UpperCAmelCase ) lowercase__ : str = compiled_tokenizer(_UpperCAmelCase ) lowercase__ : List[str] = tf_tokenizer(_UpperCAmelCase ) for key in eager_outputs.keys(): self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) ) @slow def _lowerCAmelCase( self ) -> Any: for tf_tokenizer in self.tf_tokenizers: lowercase__ : int = ModelToSave(tokenizer=_UpperCAmelCase ) lowercase__ : Optional[int] = tf.convert_to_tensor(self.test_sentences ) lowercase__ : List[Any] = model(_UpperCAmelCase ) # Build model with some sample inputs with TemporaryDirectory() as tempdir: lowercase__ : int = Path(_UpperCAmelCase ) / '''saved.model''' model.save(_UpperCAmelCase ) lowercase__ : Tuple = tf.keras.models.load_model(_UpperCAmelCase ) lowercase__ : Dict = loaded_model(_UpperCAmelCase ) # We may see small differences because the loaded model is compiled, so we need an epsilon for the test self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1E-5 )
198
'''simple docstring''' import string from math import logaa def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' UpperCAmelCase__ = document.translate( str.maketrans("""""" , """""" , string.punctuation ) ).replace("""\n""" , """""" ) UpperCAmelCase__ = document_without_punctuation.split(""" """ ) # word tokenization return len([word for word in tokenize_document if word.lower() == term.lower()] ) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' UpperCAmelCase__ = corpus.lower().translate( str.maketrans("""""" , """""" , string.punctuation ) ) # strip all punctuation and replace it with '' UpperCAmelCase__ = corpus_without_punctuation.split("""\n""" ) UpperCAmelCase__ = term.lower() return (len([doc for doc in docs if term in doc] ), len(SCREAMING_SNAKE_CASE__ )) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple=False ): '''simple docstring''' if smoothing: if n == 0: raise ValueError("""log10(0) is undefined.""" ) return round(1 + logaa(n / (1 + df) ) , 3 ) if df == 0: raise ZeroDivisionError("""df must be > 0""" ) elif n == 0: raise ValueError("""log10(0) is undefined.""" ) return round(logaa(n / df ) , 3 ) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' return round(tf * idf , 3 )
346
0
import logging import os import sys import warnings from dataclasses import dataclass, field from random import randint from typing import Optional import datasets import evaluate import numpy as np from datasets import DatasetDict, load_dataset import transformers from transformers import ( AutoConfig, AutoFeatureExtractor, AutoModelForAudioClassification, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version lowerCamelCase : Optional[Any] =logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('''4.31.0''') require_version('''datasets>=1.14.0''', '''To fix: pip install -r examples/pytorch/audio-classification/requirements.txt''') def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 1_6000 ) -> int: UpperCamelCase__ : Optional[Any] = int(round(sample_rate * max_length ) ) if len(SCREAMING_SNAKE_CASE__ ) <= sample_length: return wav UpperCamelCase__ : Optional[Any] = randint(0 , len(SCREAMING_SNAKE_CASE__ ) - sample_length - 1 ) return wav[random_offset : random_offset + sample_length] @dataclass class __a : _lowerCAmelCase : Optional[str] = field(default=lowerCamelCase_ , metadata={'''help''': '''Name of a dataset from the datasets package'''} ) _lowerCAmelCase : Optional[str] = field( default=lowerCamelCase_ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) _lowerCAmelCase : Optional[str] = field( default=lowerCamelCase_ , metadata={'''help''': '''A file containing the training audio paths and labels.'''} ) _lowerCAmelCase : Optional[str] = field( default=lowerCamelCase_ , metadata={'''help''': '''A file containing the validation audio paths and labels.'''} ) _lowerCAmelCase : str = field( default='''train''' , metadata={ '''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\'''' } , ) _lowerCAmelCase : str = field( default='''validation''' , metadata={ '''help''': ( '''The name of the training data set split to use (via the datasets library). Defaults to \'validation\'''' ) } , ) _lowerCAmelCase : str = field( default='''audio''' , metadata={'''help''': '''The name of the dataset column containing the audio data. Defaults to \'audio\''''} , ) _lowerCAmelCase : str = field( default='''label''' , metadata={'''help''': '''The name of the dataset column containing the labels. Defaults to \'label\''''} ) _lowerCAmelCase : Optional[int] = field( default=lowerCamelCase_ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) _lowerCAmelCase : Optional[int] = field( default=lowerCamelCase_ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) _lowerCAmelCase : float = field( default=2_0 , metadata={'''help''': '''Audio clips will be randomly cut to this length during training if the value is set.'''} , ) @dataclass class __a : _lowerCAmelCase : str = field( default='''facebook/wav2vec2-base''' , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} , ) _lowerCAmelCase : Optional[str] = field( default=lowerCamelCase_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) _lowerCAmelCase : Optional[str] = field( default=lowerCamelCase_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from the Hub'''} ) _lowerCAmelCase : str = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) _lowerCAmelCase : Optional[str] = field( default=lowerCamelCase_ , metadata={'''help''': '''Name or path of preprocessor config.'''} ) _lowerCAmelCase : bool = field( default=lowerCamelCase_ , metadata={'''help''': '''Whether to freeze the feature encoder layers of the model.'''} ) _lowerCAmelCase : bool = field( default=lowerCamelCase_ , metadata={'''help''': '''Whether to generate an attention mask in the feature extractor.'''} ) _lowerCAmelCase : bool = field( default=lowerCamelCase_ , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) _lowerCAmelCase : Optional[bool] = field( default=lowerCamelCase_ , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} ) _lowerCAmelCase : bool = field( default=lowerCamelCase_ , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , ) def __lowercase ( self : str ): '''simple docstring''' if not self.freeze_feature_extractor and self.freeze_feature_encoder: warnings.warn( "The argument `--freeze_feature_extractor` is deprecated and " "will be removed in a future version. Use `--freeze_feature_encoder`" "instead. Setting `freeze_feature_encoder==True`." , _UpperCAmelCase , ) if self.freeze_feature_extractor and not self.freeze_feature_encoder: raise ValueError( "The argument `--freeze_feature_extractor` is deprecated and " "should not be used in combination with `--freeze_feature_encoder`." "Only make use of `--freeze_feature_encoder`." ) def SCREAMING_SNAKE_CASE ( ) -> List[Any]: UpperCamelCase__ : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : List[Any] = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_audio_classification" , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() UpperCamelCase__ : str = training_args.get_process_log_level() logger.setLevel(SCREAMING_SNAKE_CASE__ ) transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE__ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} ' + f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' ) logger.info(f'Training/evaluation parameters {training_args}' ) # Set seed before initializing model. set_seed(training_args.seed ) # Detecting last checkpoint. UpperCamelCase__ : Tuple = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: UpperCamelCase__ : Union[str, Any] = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f'Output directory ({training_args.output_dir}) already exists and is not empty. ' "Use --overwrite_output_dir to train from scratch." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ' "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Initialize our dataset and prepare it for the audio classification task. UpperCamelCase__ : Dict = DatasetDict() UpperCamelCase__ : List[Any] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , ) UpperCamelCase__ : int = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , ) if data_args.audio_column_name not in raw_datasets["train"].column_names: raise ValueError( f'--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. ' "Make sure to set `--audio_column_name` to the correct audio column - one of " f'{", ".join(raw_datasets["train"].column_names )}.' ) if data_args.label_column_name not in raw_datasets["train"].column_names: raise ValueError( f'--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. ' "Make sure to set `--label_column_name` to the correct text column - one of " f'{", ".join(raw_datasets["train"].column_names )}.' ) # Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over # transformer outputs in the classifier, but it doesn't always lead to better accuracy UpperCamelCase__ : List[str] = AutoFeatureExtractor.from_pretrained( model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # `datasets` takes care of automatically loading and resampling the audio, # so we just need to set the correct target sampling rate. UpperCamelCase__ : Any = raw_datasets.cast_column( data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) ) UpperCamelCase__ : Optional[int] = feature_extractor.model_input_names[0] def train_transforms(__lowerCAmelCase ): UpperCamelCase__ : Optional[int] = [] for audio in batch[data_args.audio_column_name]: UpperCamelCase__ : Union[str, Any] = random_subsample( audio["array"] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate ) subsampled_wavs.append(SCREAMING_SNAKE_CASE__ ) UpperCamelCase__ : Dict = feature_extractor(SCREAMING_SNAKE_CASE__ , sampling_rate=feature_extractor.sampling_rate ) UpperCamelCase__ : Tuple = {model_input_name: inputs.get(SCREAMING_SNAKE_CASE__ )} UpperCamelCase__ : Union[str, Any] = list(batch[data_args.label_column_name] ) return output_batch def val_transforms(__lowerCAmelCase ): UpperCamelCase__ : Any = [audio["array"] for audio in batch[data_args.audio_column_name]] UpperCamelCase__ : List[str] = feature_extractor(SCREAMING_SNAKE_CASE__ , sampling_rate=feature_extractor.sampling_rate ) UpperCamelCase__ : Dict = {model_input_name: inputs.get(SCREAMING_SNAKE_CASE__ )} UpperCamelCase__ : List[Any] = list(batch[data_args.label_column_name] ) return output_batch # Prepare label mappings. # We'll include these in the model's config to get human readable labels in the Inference API. UpperCamelCase__ : str = raw_datasets["train"].features[data_args.label_column_name].names UpperCamelCase__ , UpperCamelCase__ : Dict = {}, {} for i, label in enumerate(SCREAMING_SNAKE_CASE__ ): UpperCamelCase__ : int = str(SCREAMING_SNAKE_CASE__ ) UpperCamelCase__ : Tuple = label # Load the accuracy metric from the datasets package UpperCamelCase__ : List[str] = evaluate.load("accuracy" ) # Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with # `predictions` and `label_ids` fields) and has to return a dictionary string to float. def compute_metrics(__lowerCAmelCase ): UpperCamelCase__ : Optional[int] = np.argmax(eval_pred.predictions , axis=1 ) return metric.compute(predictions=SCREAMING_SNAKE_CASE__ , references=eval_pred.label_ids ) UpperCamelCase__ : Optional[Any] = AutoConfig.from_pretrained( model_args.config_name or model_args.model_name_or_path , num_labels=len(SCREAMING_SNAKE_CASE__ ) , labelaid=SCREAMING_SNAKE_CASE__ , idalabel=SCREAMING_SNAKE_CASE__ , finetuning_task="audio-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) UpperCamelCase__ : Optional[Any] = AutoModelForAudioClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , ) # freeze the convolutional waveform encoder if model_args.freeze_feature_encoder: model.freeze_feature_encoder() if training_args.do_train: if data_args.max_train_samples is not None: UpperCamelCase__ : Optional[Any] = ( raw_datasets["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) ) # Set the training transforms raw_datasets["train"].set_transform(SCREAMING_SNAKE_CASE__ , output_all_columns=SCREAMING_SNAKE_CASE__ ) if training_args.do_eval: if data_args.max_eval_samples is not None: UpperCamelCase__ : Dict = ( raw_datasets["eval"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms raw_datasets["eval"].set_transform(SCREAMING_SNAKE_CASE__ , output_all_columns=SCREAMING_SNAKE_CASE__ ) # Initialize our trainer UpperCamelCase__ : Tuple = Trainer( model=SCREAMING_SNAKE_CASE__ , args=SCREAMING_SNAKE_CASE__ , train_dataset=raw_datasets["train"] if training_args.do_train else None , eval_dataset=raw_datasets["eval"] if training_args.do_eval else None , compute_metrics=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ , ) # Training if training_args.do_train: UpperCamelCase__ : List[str] = None if training_args.resume_from_checkpoint is not None: UpperCamelCase__ : Union[str, Any] = training_args.resume_from_checkpoint elif last_checkpoint is not None: UpperCamelCase__ : Optional[int] = last_checkpoint UpperCamelCase__ : Tuple = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE__ ) trainer.save_model() trainer.log_metrics("train" , train_result.metrics ) trainer.save_metrics("train" , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: UpperCamelCase__ : str = trainer.evaluate() trainer.log_metrics("eval" , SCREAMING_SNAKE_CASE__ ) trainer.save_metrics("eval" , SCREAMING_SNAKE_CASE__ ) # Write model card and (optionally) push to hub UpperCamelCase__ : int = { "finetuned_from": model_args.model_name_or_path, "tasks": "audio-classification", "dataset": data_args.dataset_name, "tags": ["audio-classification"], } if training_args.push_to_hub: trainer.push_to_hub(**SCREAMING_SNAKE_CASE__ ) else: trainer.create_model_card(**SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": main()
189
'''simple docstring''' import argparse import torch from transformers import BertForMaskedLM if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser( description=( 'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned' ' Distillation' ) ) parser.add_argument('--model_type', default='bert', choices=['bert']) parser.add_argument('--model_name', default='bert-base-uncased', type=str) parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str) parser.add_argument('--vocab_transform', action='store_true') UpperCAmelCase_ = parser.parse_args() if args.model_type == "bert": UpperCAmelCase_ = BertForMaskedLM.from_pretrained(args.model_name) UpperCAmelCase_ = 'bert' else: raise ValueError('args.model_type should be "bert".') UpperCAmelCase_ = model.state_dict() UpperCAmelCase_ = {} for w in ["word_embeddings", "position_embeddings"]: UpperCAmelCase_ = state_dict[f"{prefix}.embeddings.{w}.weight"] for w in ["weight", "bias"]: UpperCAmelCase_ = state_dict[f"{prefix}.embeddings.LayerNorm.{w}"] UpperCAmelCase_ = 0 for teacher_idx in [0, 2, 4, 7, 9, 1_1]: for w in ["weight", "bias"]: UpperCAmelCase_ = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}" ] UpperCAmelCase_ = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}" ] UpperCAmelCase_ = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}" ] UpperCAmelCase_ = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}" ] UpperCAmelCase_ = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}" ] UpperCAmelCase_ = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}" ] UpperCAmelCase_ = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}" ] UpperCAmelCase_ = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}" ] std_idx += 1 UpperCAmelCase_ = state_dict['cls.predictions.decoder.weight'] UpperCAmelCase_ = state_dict['cls.predictions.bias'] if args.vocab_transform: for w in ["weight", "bias"]: UpperCAmelCase_ = state_dict[f"cls.predictions.transform.dense.{w}"] UpperCAmelCase_ = state_dict[f"cls.predictions.transform.LayerNorm.{w}"] print(f"N layers selected for distillation: {std_idx}") print(f"Number of params transferred for distillation: {len(compressed_sd.keys())}") print(f"Save transferred checkpoint to {args.dump_checkpoint}.") torch.save(compressed_sd, args.dump_checkpoint)
346
0
def _a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : list[list[int]] ): def update_area_of_max_square(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> int: # BASE CASE if row >= rows or col >= cols: return 0 __lowerCAmelCase = update_area_of_max_square(SCREAMING_SNAKE_CASE__ , col + 1 ) __lowerCAmelCase = update_area_of_max_square(row + 1 , col + 1 ) __lowerCAmelCase = update_area_of_max_square(row + 1 , SCREAMING_SNAKE_CASE__ ) if mat[row][col]: __lowerCAmelCase = 1 + min([right, diagonal, down] ) __lowerCAmelCase = max(largest_square_area[0] , SCREAMING_SNAKE_CASE__ ) return sub_problem_sol else: return 0 __lowerCAmelCase = [0] update_area_of_max_square(0 , 0 ) return largest_square_area[0] def _a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : list[list[int]] ): def update_area_of_max_square_using_dp_array( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : list[list[int]] ) -> int: if row >= rows or col >= cols: return 0 if dp_array[row][col] != -1: return dp_array[row][col] __lowerCAmelCase = update_area_of_max_square_using_dp_array(SCREAMING_SNAKE_CASE__ , col + 1 , SCREAMING_SNAKE_CASE__ ) __lowerCAmelCase = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , SCREAMING_SNAKE_CASE__ ) __lowerCAmelCase = update_area_of_max_square_using_dp_array(row + 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if mat[row][col]: __lowerCAmelCase = 1 + min([right, diagonal, down] ) __lowerCAmelCase = max(largest_square_area[0] , SCREAMING_SNAKE_CASE__ ) __lowerCAmelCase = sub_problem_sol return sub_problem_sol else: return 0 __lowerCAmelCase = [0] __lowerCAmelCase = [[-1] * cols for _ in range(SCREAMING_SNAKE_CASE__ )] update_area_of_max_square_using_dp_array(0 , 0 , SCREAMING_SNAKE_CASE__ ) return largest_square_area[0] def _a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : list[list[int]] ): __lowerCAmelCase = [[0] * (cols + 1) for _ in range(rows + 1 )] __lowerCAmelCase = 0 for row in range(rows - 1 , -1 , -1 ): for col in range(cols - 1 , -1 , -1 ): __lowerCAmelCase = dp_array[row][col + 1] __lowerCAmelCase = dp_array[row + 1][col + 1] __lowerCAmelCase = dp_array[row + 1][col] if mat[row][col] == 1: __lowerCAmelCase = 1 + min(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __lowerCAmelCase = max(dp_array[row][col] , SCREAMING_SNAKE_CASE__ ) else: __lowerCAmelCase = 0 return largest_square_area def _a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : list[list[int]] ): __lowerCAmelCase = [0] * (cols + 1) __lowerCAmelCase = [0] * (cols + 1) __lowerCAmelCase = 0 for row in range(rows - 1 , -1 , -1 ): for col in range(cols - 1 , -1 , -1 ): __lowerCAmelCase = current_row[col + 1] __lowerCAmelCase = next_row[col + 1] __lowerCAmelCase = next_row[col] if mat[row][col] == 1: __lowerCAmelCase = 1 + min(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __lowerCAmelCase = max(current_row[col] , SCREAMING_SNAKE_CASE__ ) else: __lowerCAmelCase = 0 __lowerCAmelCase = current_row return largest_square_area if __name__ == "__main__": import doctest doctest.testmod() print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
92
'''simple docstring''' import tempfile import torch from diffusers import PNDMScheduler from .test_schedulers import SchedulerCommonTest class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' lowerCAmelCase_ : Union[str, Any] = (PNDMScheduler,) lowerCAmelCase_ : Optional[int] = (("""num_inference_steps""", 50),) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , **_UpperCAmelCase : Optional[int] ): """simple docstring""" UpperCAmelCase__ = { """num_train_timesteps""": 10_00, """beta_start""": 0.0001, """beta_end""": 0.02, """beta_schedule""": """linear""", } config.update(**_UpperCAmelCase ) return config def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : Tuple=0 , **_UpperCAmelCase : List[str] ): """simple docstring""" UpperCAmelCase__ = dict(self.forward_default_kwargs ) UpperCAmelCase__ = kwargs.pop("""num_inference_steps""" , _UpperCAmelCase ) UpperCAmelCase__ = self.dummy_sample UpperCAmelCase__ = 0.1 * sample UpperCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: UpperCAmelCase__ = self.get_scheduler_config(**_UpperCAmelCase ) UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residuals UpperCAmelCase__ = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_UpperCAmelCase ) UpperCAmelCase__ = scheduler_class.from_pretrained(_UpperCAmelCase ) new_scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residuals UpperCAmelCase__ = dummy_past_residuals[:] UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample UpperCAmelCase__ = new_scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample UpperCAmelCase__ = new_scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" pass def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : Union[str, Any]=0 , **_UpperCAmelCase : Optional[int] ): """simple docstring""" UpperCAmelCase__ = dict(self.forward_default_kwargs ) UpperCAmelCase__ = kwargs.pop("""num_inference_steps""" , _UpperCAmelCase ) UpperCAmelCase__ = self.dummy_sample UpperCAmelCase__ = 0.1 * sample UpperCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: UpperCAmelCase__ = self.get_scheduler_config() UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residuals (must be after setting timesteps) UpperCAmelCase__ = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_UpperCAmelCase ) UpperCAmelCase__ = scheduler_class.from_pretrained(_UpperCAmelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residual (must be after setting timesteps) UpperCAmelCase__ = dummy_past_residuals[:] UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample UpperCAmelCase__ = new_scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample UpperCAmelCase__ = new_scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def SCREAMING_SNAKE_CASE__ ( self : int , **_UpperCAmelCase : Tuple ): """simple docstring""" UpperCAmelCase__ = self.scheduler_classes[0] UpperCAmelCase__ = self.get_scheduler_config(**_UpperCAmelCase ) UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) UpperCAmelCase__ = 10 UpperCAmelCase__ = self.dummy_model() UpperCAmelCase__ = self.dummy_sample_deter scheduler.set_timesteps(_UpperCAmelCase ) for i, t in enumerate(scheduler.prk_timesteps ): UpperCAmelCase__ = model(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample for i, t in enumerate(scheduler.plms_timesteps ): UpperCAmelCase__ = model(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample return sample def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" UpperCAmelCase__ = dict(self.forward_default_kwargs ) UpperCAmelCase__ = kwargs.pop("""num_inference_steps""" , _UpperCAmelCase ) for scheduler_class in self.scheduler_classes: UpperCAmelCase__ = self.get_scheduler_config() UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) UpperCAmelCase__ = self.dummy_sample UpperCAmelCase__ = 0.1 * sample if num_inference_steps is not None and hasattr(_UpperCAmelCase , """set_timesteps""" ): scheduler.set_timesteps(_UpperCAmelCase ) elif num_inference_steps is not None and not hasattr(_UpperCAmelCase , """set_timesteps""" ): UpperCAmelCase__ = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) UpperCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] UpperCAmelCase__ = dummy_past_residuals[:] UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , 0 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , 1 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , 0 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , 1 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def SCREAMING_SNAKE_CASE__ ( self : int ): """simple docstring""" for timesteps in [1_00, 10_00]: self.check_over_configs(num_train_timesteps=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" for steps_offset in [0, 1]: self.check_over_configs(steps_offset=_UpperCAmelCase ) UpperCAmelCase__ = self.scheduler_classes[0] UpperCAmelCase__ = self.get_scheduler_config(steps_offset=1 ) UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(10 ) assert torch.equal( scheduler.timesteps , torch.LongTensor( [9_01, 8_51, 8_51, 8_01, 8_01, 7_51, 7_51, 7_01, 7_01, 6_51, 6_51, 6_01, 6_01, 5_01, 4_01, 3_01, 2_01, 1_01, 1] ) , ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): """simple docstring""" for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ): self.check_over_configs(beta_start=_UpperCAmelCase , beta_end=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" for t in [1, 5, 10]: self.check_over_forward(time_step=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00] ): self.check_over_forward(num_inference_steps=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" UpperCAmelCase__ = 27 for scheduler_class in self.scheduler_classes: UpperCAmelCase__ = self.dummy_sample UpperCAmelCase__ = 0.1 * sample UpperCAmelCase__ = self.get_scheduler_config() UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(_UpperCAmelCase ) # before power of 3 fix, would error on first step, so we only need to do two for i, t in enumerate(scheduler.prk_timesteps[:2] ): UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample def SCREAMING_SNAKE_CASE__ ( self : Dict ): """simple docstring""" with self.assertRaises(_UpperCAmelCase ): UpperCAmelCase__ = self.scheduler_classes[0] UpperCAmelCase__ = self.get_scheduler_config() UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): """simple docstring""" UpperCAmelCase__ = self.full_loop() UpperCAmelCase__ = torch.sum(torch.abs(_UpperCAmelCase ) ) UpperCAmelCase__ = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_sum.item() - 198.1318 ) < 1E-2 assert abs(result_mean.item() - 0.2580 ) < 1E-3 def SCREAMING_SNAKE_CASE__ ( self : Dict ): """simple docstring""" UpperCAmelCase__ = self.full_loop(prediction_type="""v_prediction""" ) UpperCAmelCase__ = torch.sum(torch.abs(_UpperCAmelCase ) ) UpperCAmelCase__ = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_sum.item() - 67.3986 ) < 1E-2 assert abs(result_mean.item() - 0.0878 ) < 1E-3 def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" UpperCAmelCase__ = self.full_loop(set_alpha_to_one=_UpperCAmelCase , beta_start=0.01 ) UpperCAmelCase__ = torch.sum(torch.abs(_UpperCAmelCase ) ) UpperCAmelCase__ = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_sum.item() - 230.0399 ) < 1E-2 assert abs(result_mean.item() - 0.2995 ) < 1E-3 def SCREAMING_SNAKE_CASE__ ( self : int ): """simple docstring""" UpperCAmelCase__ = self.full_loop(set_alpha_to_one=_UpperCAmelCase , beta_start=0.01 ) UpperCAmelCase__ = torch.sum(torch.abs(_UpperCAmelCase ) ) UpperCAmelCase__ = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_sum.item() - 186.9482 ) < 1E-2 assert abs(result_mean.item() - 0.2434 ) < 1E-3
346
0
import unittest import numpy as np import requests from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: lowercase__ : Tuple = False if is_vision_available(): from PIL import Image from transformers import PixaStructImageProcessor class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def __init__( self : Optional[int] , __lowercase : Tuple , __lowercase : Optional[Any]=7 , __lowercase : Any=3 , __lowercase : Tuple=18 , __lowercase : str=30 , __lowercase : Optional[Any]=4_00 , __lowercase : List[Any]=None , __lowercase : Tuple=True , __lowercase : Optional[int]=True , __lowercase : str=None , ): """simple docstring""" snake_case_ = size if size is not None else {"height": 20, "width": 20} snake_case_ = parent snake_case_ = batch_size snake_case_ = num_channels snake_case_ = image_size snake_case_ = min_resolution snake_case_ = max_resolution snake_case_ = size snake_case_ = do_normalize snake_case_ = do_convert_rgb snake_case_ = [5_12, 10_24, 20_48, 40_96] snake_case_ = patch_size if patch_size is not None else {"height": 16, "width": 16} def snake_case__ ( self : str ): """simple docstring""" return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb} def snake_case__ ( self : List[str] ): """simple docstring""" snake_case_ = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg" snake_case_ = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw ).convert("RGB" ) return raw_image @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , ) @require_torch @require_vision class UpperCAmelCase ( lowerCamelCase_ , unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ = PixaStructImageProcessor if is_vision_available() else None def snake_case__ ( self : int ): """simple docstring""" snake_case_ = PixaStructImageProcessingTester(self ) @property def snake_case__ ( self : Union[str, Any] ): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def snake_case__ ( self : Dict ): """simple docstring""" snake_case_ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_UpperCAmelCase , "do_normalize" ) ) self.assertTrue(hasattr(_UpperCAmelCase , "do_convert_rgb" ) ) def snake_case__ ( self : int ): """simple docstring""" snake_case_ = self.image_processor_tester.prepare_dummy_image() snake_case_ = self.image_processing_class(**self.image_processor_dict ) snake_case_ = 20_48 snake_case_ = image_processor(_UpperCAmelCase , return_tensors="pt" , max_patches=_UpperCAmelCase ) self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1E-3 , rtol=1E-3 ) ) def snake_case__ ( self : Optional[Any] ): """simple docstring""" snake_case_ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , Image.Image ) # Test not batched input snake_case_ = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input snake_case_ = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched snake_case_ = image_processor( _UpperCAmelCase , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def snake_case__ ( self : Optional[Any] ): """simple docstring""" snake_case_ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , Image.Image ) # Test not batched input snake_case_ = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 snake_case_ = True for max_patch in self.image_processor_tester.max_patches: # Test not batched input with self.assertRaises(_UpperCAmelCase ): snake_case_ = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches snake_case_ = "Hello" snake_case_ = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=_UpperCAmelCase , header_text=_UpperCAmelCase ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched snake_case_ = image_processor( _UpperCAmelCase , return_tensors="pt" , max_patches=_UpperCAmelCase , header_text=_UpperCAmelCase ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def snake_case__ ( self : List[str] ): """simple docstring""" snake_case_ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , np.ndarray ) snake_case_ = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input snake_case_ = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched snake_case_ = image_processor( _UpperCAmelCase , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def snake_case__ ( self : Any ): """simple docstring""" snake_case_ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , torch.Tensor ) # Test not batched input snake_case_ = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input snake_case_ = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched snake_case_ = image_processor( _UpperCAmelCase , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , ) @require_torch @require_vision class UpperCAmelCase ( lowerCamelCase_ , unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ = PixaStructImageProcessor if is_vision_available() else None def snake_case__ ( self : List[str] ): """simple docstring""" snake_case_ = PixaStructImageProcessingTester(self , num_channels=4 ) snake_case_ = 3 @property def snake_case__ ( self : Union[str, Any] ): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def snake_case__ ( self : List[str] ): """simple docstring""" snake_case_ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_UpperCAmelCase , "do_normalize" ) ) self.assertTrue(hasattr(_UpperCAmelCase , "do_convert_rgb" ) ) def snake_case__ ( self : Tuple ): """simple docstring""" snake_case_ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , Image.Image ) # Test not batched input snake_case_ = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * (self.image_processor_tester.num_channels - 1) ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input snake_case_ = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched snake_case_ = image_processor( _UpperCAmelCase , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
187
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { 'google/vivit-b-16x2-kinetics400': ( 'https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json' ), # See all Vivit models at https://huggingface.co/models?filter=vivit } class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' lowerCAmelCase_ : Optional[int] = """vivit""" def __init__( self : List[str] , _UpperCAmelCase : List[Any]=2_24 , _UpperCAmelCase : List[str]=32 , _UpperCAmelCase : Any=[2, 16, 16] , _UpperCAmelCase : int=3 , _UpperCAmelCase : Optional[Any]=7_68 , _UpperCAmelCase : Union[str, Any]=12 , _UpperCAmelCase : Dict=12 , _UpperCAmelCase : Optional[Any]=30_72 , _UpperCAmelCase : Optional[int]="gelu_fast" , _UpperCAmelCase : Union[str, Any]=0.0 , _UpperCAmelCase : Tuple=0.0 , _UpperCAmelCase : Optional[int]=0.02 , _UpperCAmelCase : List[Any]=1E-06 , _UpperCAmelCase : List[str]=True , **_UpperCAmelCase : List[Any] , ): """simple docstring""" UpperCAmelCase__ = hidden_size UpperCAmelCase__ = num_hidden_layers UpperCAmelCase__ = num_attention_heads UpperCAmelCase__ = intermediate_size UpperCAmelCase__ = hidden_act UpperCAmelCase__ = hidden_dropout_prob UpperCAmelCase__ = attention_probs_dropout_prob UpperCAmelCase__ = initializer_range UpperCAmelCase__ = layer_norm_eps UpperCAmelCase__ = image_size UpperCAmelCase__ = num_frames UpperCAmelCase__ = tubelet_size UpperCAmelCase__ = num_channels UpperCAmelCase__ = qkv_bias super().__init__(**_UpperCAmelCase )
346
0
"""simple docstring""" import argparse import json import os import torch from torch import nn from transformers import NllbMoeConfig, NllbMoeModel from transformers.modeling_utils import dtype_byte_size from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME def _a ( _SCREAMING_SNAKE_CASE ) -> str: snake_case_ = [ """encoder.version""", """decoder.version""", """model.encoder.version""", """model.decoder.version""", """decoder.output_projection.weight""", """_float_tensor""", """encoder.embed_positions._float_tensor""", """decoder.embed_positions._float_tensor""", ] for k in ignore_keys: state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def _a ( _SCREAMING_SNAKE_CASE ) -> Optional[int]: snake_case_ , snake_case_ = emb.weight.shape snake_case_ = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE ) snake_case_ = emb.weight.data return lin_layer def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Tuple: snake_case_ = {} for old_key in state_dict.keys(): snake_case_ = old_key if "moe_layer.experts." in key: if expert_idx is not None: snake_case_ = key.replace("""moe_layer.experts.0""" , f"""ffn.experts.expert_{expert_idx}""" ) else: snake_case_ = key.replace("""moe_layer.experts.""" , """ffn.experts.expert_""" ) if "gate" in key: snake_case_ = key.replace(""".moe_layer.gate.wg""" , """.ffn.router.classifier""" ) if "fc2" and "experts" not in key: snake_case_ = key.replace(""".fc2.""" , """.ffn.fc2.""" ) if "fc1" and "experts" not in key: snake_case_ = key.replace(""".fc1.""" , """.ffn.fc1.""" ) if ".encoder_attn." in key: snake_case_ = key.replace(""".encoder_attn.""" , """.cross_attention.""" ) if "encoder_attn_layer_norm" in key: snake_case_ = key.replace("""encoder_attn_layer_norm""" , """cross_attention_layer_norm""" ) if "final_layer_norm" in key: snake_case_ = key.replace("""final_layer_norm""" , """ff_layer_norm""" ) snake_case_ = state_dict[old_key] return new_dict def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = WEIGHTS_NAME ) -> Union[str, Any]: snake_case_ = [] snake_case_ = 0 os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE ) for expert in range(_SCREAMING_SNAKE_CASE ): snake_case_ = switch_checkpoint_path + f"""-rank-{expert}.pt""" if os.path.isfile(_SCREAMING_SNAKE_CASE ): snake_case_ = torch.load(_SCREAMING_SNAKE_CASE )["""model"""] remove_ignore_keys_(_SCREAMING_SNAKE_CASE ) snake_case_ = rename_fairseq_keys(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = os.path.join( _SCREAMING_SNAKE_CASE , weights_name.replace(""".bin""" , f"""-{len(_SCREAMING_SNAKE_CASE )+1:05d}-of-???.bin""" ) ) torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) sharded_state_dicts.append(expert_state.keys() ) total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size( expert_state[list(_SCREAMING_SNAKE_CASE )[0]].dtype ) # Add the last block snake_case_ = os.path.join(_SCREAMING_SNAKE_CASE , weights_name.replace(""".bin""" , f"""-{len(_SCREAMING_SNAKE_CASE )+1:05d}-of-???.bin""" ) ) snake_case_ = torch.load(switch_checkpoint_path + """-shared.pt""" )["""model"""] remove_ignore_keys_(_SCREAMING_SNAKE_CASE ) snake_case_ = rename_fairseq_keys(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = shared_weights["""decoder.embed_tokens.weight"""] sharded_state_dicts.append(shared_weights.keys() ) # If we only have the shared weights (dummy model/experts saved on the same file) if len(_SCREAMING_SNAKE_CASE ) == 1: snake_case_ = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return {weights_name: sharded_state_dicts[0]}, None else: torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Otherwise, let's build the index snake_case_ = {} for idx, shard in enumerate(_SCREAMING_SNAKE_CASE ): snake_case_ = weights_name.replace(""".bin""" , f"""-{idx+1:05d}-of-{len(_SCREAMING_SNAKE_CASE ):05d}.bin""" ) snake_case_ = os.path.join(_SCREAMING_SNAKE_CASE , weights_name.replace(""".bin""" , f"""-{idx+1:05d}-of-???.bin""" ) ) os.rename(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) for key in shard: snake_case_ = shard_file # Add the metadata snake_case_ = {"""total_size""": total_size} snake_case_ = {"""metadata""": metadata, """weight_map""": weight_map} with open(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , """w""" , encoding="""utf-8""" ) as f: snake_case_ = json.dumps(_SCREAMING_SNAKE_CASE , indent=2 , sort_keys=_SCREAMING_SNAKE_CASE ) + """\n""" f.write(_SCREAMING_SNAKE_CASE ) return metadata, index if __name__ == "__main__": __SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser() # Required parameters parser.add_argument( '--nllb_moe_checkpoint_path', default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000', type=str, required=False, help='Path to a directory containing a folder per layer. Follows the original Google format.', ) parser.add_argument('--dtype', default='float32', type=str, required=False, help='dtype of the saved model') parser.add_argument( '--pytorch_dump_folder_path', default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b', type=str, required=False, help='Path to the output pytorch model.', ) __SCREAMING_SNAKE_CASE : int = parser.parse_args() __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = shard_on_the_fly( args.nllb_moe_checkpoint_path, args.pytorch_dump_folder_path, 128, args.dtype, ) __SCREAMING_SNAKE_CASE : Any = NllbMoeConfig.from_pretrained( 'facebook/nllb-200-3.3B', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128 ) config.save_pretrained(args.pytorch_dump_folder_path) __SCREAMING_SNAKE_CASE : List[Any] = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path) print('Done') model.save_pretrained(args.pytorch_dump_folder_path)
347
"""simple docstring""" from __future__ import annotations def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]: print(f"""Vertex\tShortest Distance from vertex {src}""" ) for i, d in enumerate(_SCREAMING_SNAKE_CASE ): print(f"""{i}\t\t{d}""" ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: for j in range(_SCREAMING_SNAKE_CASE ): snake_case_ , snake_case_ , snake_case_ = (graph[j][k] for k in ["""src""", """dst""", """weight"""]) if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]: return True return False def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> list[float]: snake_case_ = [float("""inf""" )] * vertex_count snake_case_ = 0.0 for _ in range(vertex_count - 1 ): for j in range(_SCREAMING_SNAKE_CASE ): snake_case_ , snake_case_ , snake_case_ = (graph[j][k] for k in ["""src""", """dst""", """weight"""]) if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]: snake_case_ = distance[u] + w snake_case_ = check_negative_cycle(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if negative_cycle_exists: raise Exception("""Negative cycle found""" ) return distance if __name__ == "__main__": import doctest doctest.testmod() __SCREAMING_SNAKE_CASE : int = int(input('Enter number of vertices: ').strip()) __SCREAMING_SNAKE_CASE : Dict = int(input('Enter number of edges: ').strip()) __SCREAMING_SNAKE_CASE : list[dict[str, int]] = [{} for _ in range(E)] for i in range(E): print('Edge ', i + 1) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = ( int(x) for x in input('Enter source, destination, weight: ').strip().split(' ') ) __SCREAMING_SNAKE_CASE : Union[str, Any] = {'src': src, 'dst': dest, 'weight': weight} __SCREAMING_SNAKE_CASE : Union[str, Any] = int(input('\nEnter shortest path source:').strip()) __SCREAMING_SNAKE_CASE : str = bellman_ford(graph, V, E, source) print_distance(shortest_distance, 0)
347
1
"""simple docstring""" import argparse import json import os import sys import tempfile import unittest from argparse import Namespace from dataclasses import dataclass, field from enum import Enum from pathlib import Path from typing import List, Literal, Optional import yaml from transformers import HfArgumentParser, TrainingArguments from transformers.hf_argparser import make_choice_type_function, string_to_bool # Since Python 3.10, we can use the builtin `|` operator for Union types # See PEP 604: https://peps.python.org/pep-0604 __SCREAMING_SNAKE_CASE : int = sys.version_info >= (3, 10) def _a ( _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Tuple: return field(default_factory=lambda: default , metadata=_SCREAMING_SNAKE_CASE ) @dataclass class __A : '''simple docstring''' __lowercase: int __lowercase: float __lowercase: str __lowercase: bool @dataclass class __A : '''simple docstring''' __lowercase: int = 42 __lowercase: str = field(default="""toto""" , metadata={"""help""": """help message"""}) @dataclass class __A : '''simple docstring''' __lowercase: bool = False __lowercase: bool = True __lowercase: Optional[bool] = None class __A (snake_case__): '''simple docstring''' __lowercase: str = """titi""" __lowercase: Any = """toto""" class __A (snake_case__): '''simple docstring''' __lowercase: int = """titi""" __lowercase: Optional[Any] = """toto""" __lowercase: List[Any] = 42 @dataclass class __A : '''simple docstring''' __lowercase: BasicEnum = "toto" def lowerCAmelCase ( self : int ) ->List[Any]: """simple docstring""" snake_case_ = BasicEnum(self.foo ) @dataclass class __A : '''simple docstring''' __lowercase: MixedTypeEnum = "toto" def lowerCAmelCase ( self : Optional[int] ) ->Optional[Any]: """simple docstring""" snake_case_ = MixedTypeEnum(self.foo ) @dataclass class __A : '''simple docstring''' __lowercase: Optional[int] = None __lowercase: Optional[float] = field(default=snake_case__ , metadata={"""help""": """help message"""}) __lowercase: Optional[str] = None __lowercase: Optional[List[str]] = list_field(default=[]) __lowercase: Optional[List[int]] = list_field(default=[]) @dataclass class __A : '''simple docstring''' __lowercase: List[int] = list_field(default=[]) __lowercase: List[int] = list_field(default=[1, 2, 3]) __lowercase: List[str] = list_field(default=["""Hallo""", """Bonjour""", """Hello"""]) __lowercase: List[float] = list_field(default=[0.1, 0.2, 0.3]) @dataclass class __A : '''simple docstring''' __lowercase: List[int] = field() __lowercase: str = field() __lowercase: BasicEnum = field() def lowerCAmelCase ( self : Any ) ->str: """simple docstring""" snake_case_ = BasicEnum(self.required_enum ) @dataclass class __A : '''simple docstring''' __lowercase: int __lowercase: "BasicEnum" = field() __lowercase: "Optional[bool]" = None __lowercase: "str" = field(default="""toto""" , metadata={"""help""": """help message"""}) __lowercase: "List[str]" = list_field(default=["""Hallo""", """Bonjour""", """Hello"""]) if is_python_no_less_than_3_10: @dataclass class __A : '''simple docstring''' __lowercase: bool = False __lowercase: bool = True __lowercase: bool | None = None @dataclass class __A : '''simple docstring''' __lowercase: int | None = None __lowercase: float | None = field(default=snake_case__ , metadata={"""help""": """help message"""}) __lowercase: str | None = None __lowercase: list[str] | None = list_field(default=[]) __lowercase: list[int] | None = list_field(default=[]) class __A (unittest.TestCase): '''simple docstring''' def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : argparse.ArgumentParser , UpperCAmelCase_ : argparse.ArgumentParser ) ->Optional[int]: """simple docstring""" self.assertEqual(len(a._actions ) , len(b._actions ) ) for x, y in zip(a._actions , b._actions ): snake_case_ = {k: v for k, v in vars(UpperCAmelCase_ ).items() if k != """container"""} snake_case_ = {k: v for k, v in vars(UpperCAmelCase_ ).items() if k != """container"""} # Choices with mixed type have custom function as "type" # So we need to compare results directly for equality if xx.get("""choices""" , UpperCAmelCase_ ) and yy.get("""choices""" , UpperCAmelCase_ ): for expected_choice in yy["choices"] + xx["choices"]: self.assertEqual(xx["""type"""](UpperCAmelCase_ ) , yy["""type"""](UpperCAmelCase_ ) ) del xx["type"], yy["type"] self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : int ) ->Any: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = argparse.ArgumentParser() expected.add_argument("""--foo""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ ) expected.add_argument("""--bar""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ ) expected.add_argument("""--baz""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ ) expected.add_argument("""--flag""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , const=UpperCAmelCase_ , nargs="""?""" ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = ["""--foo""", """1""", """--baz""", """quux""", """--bar""", """0.5"""] ((snake_case_) , ) = parser.parse_args_into_dataclasses(UpperCAmelCase_ , look_for_args_file=UpperCAmelCase_ ) self.assertFalse(example.flag ) def lowerCAmelCase ( self : Optional[Any] ) ->List[Any]: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = argparse.ArgumentParser() expected.add_argument("""--foo""" , default=42 , type=UpperCAmelCase_ ) expected.add_argument("""--baz""" , default="""toto""" , type=UpperCAmelCase_ , help="""help message""" ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : List[Any] ) ->Union[str, Any]: """simple docstring""" snake_case_ = argparse.ArgumentParser() expected.add_argument("""--foo""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , const=UpperCAmelCase_ , nargs="""?""" ) expected.add_argument("""--baz""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , const=UpperCAmelCase_ , nargs="""?""" ) # A boolean no_* argument always has to come after its "default: True" regular counter-part # and its default must be set to False expected.add_argument("""--no_baz""" , action="""store_false""" , default=UpperCAmelCase_ , dest="""baz""" ) expected.add_argument("""--opt""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ ) snake_case_ = [WithDefaultBoolExample] if is_python_no_less_than_3_10: dataclass_types.append(UpperCAmelCase_ ) for dataclass_type in dataclass_types: snake_case_ = HfArgumentParser(UpperCAmelCase_ ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = parser.parse_args([] ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) ) snake_case_ = parser.parse_args(["""--foo""", """--no_baz"""] ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) ) snake_case_ = parser.parse_args(["""--foo""", """--baz"""] ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) ) snake_case_ = parser.parse_args(["""--foo""", """True""", """--baz""", """True""", """--opt""", """True"""] ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) ) snake_case_ = parser.parse_args(["""--foo""", """False""", """--baz""", """False""", """--opt""", """False"""] ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) ) def lowerCAmelCase ( self : int ) ->List[str]: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = argparse.ArgumentParser() expected.add_argument( """--foo""" , default="""toto""" , choices=["""titi""", """toto""", 42] , type=make_choice_type_function(["""titi""", """toto""", 42] ) , ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = parser.parse_args([] ) self.assertEqual(args.foo , """toto""" ) snake_case_ = parser.parse_args_into_dataclasses([] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.toto ) snake_case_ = parser.parse_args(["""--foo""", """titi"""] ) self.assertEqual(args.foo , """titi""" ) snake_case_ = parser.parse_args_into_dataclasses(["""--foo""", """titi"""] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.titi ) snake_case_ = parser.parse_args(["""--foo""", """42"""] ) self.assertEqual(args.foo , 42 ) snake_case_ = parser.parse_args_into_dataclasses(["""--foo""", """42"""] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo ) def lowerCAmelCase ( self : Dict ) ->str: """simple docstring""" @dataclass class __A : '''simple docstring''' __lowercase: Literal["titi", "toto", 42] = "toto" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = argparse.ArgumentParser() expected.add_argument( """--foo""" , default="""toto""" , choices=("""titi""", """toto""", 42) , type=make_choice_type_function(["""titi""", """toto""", 42] ) , ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = parser.parse_args([] ) self.assertEqual(args.foo , """toto""" ) snake_case_ = parser.parse_args(["""--foo""", """titi"""] ) self.assertEqual(args.foo , """titi""" ) snake_case_ = parser.parse_args(["""--foo""", """42"""] ) self.assertEqual(args.foo , 42 ) def lowerCAmelCase ( self : Optional[int] ) ->Dict: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = argparse.ArgumentParser() expected.add_argument("""--foo_int""" , nargs="""+""" , default=[] , type=UpperCAmelCase_ ) expected.add_argument("""--bar_int""" , nargs="""+""" , default=[1, 2, 3] , type=UpperCAmelCase_ ) expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=UpperCAmelCase_ ) expected.add_argument("""--foo_float""" , nargs="""+""" , default=[0.1, 0.2, 0.3] , type=UpperCAmelCase_ ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = parser.parse_args([] ) self.assertEqual( UpperCAmelCase_ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["""Hallo""", """Bonjour""", """Hello"""] , foo_float=[0.1, 0.2, 0.3] ) , ) snake_case_ = parser.parse_args("""--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7""".split() ) self.assertEqual(UpperCAmelCase_ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["""a""", """b""", """c"""] , foo_float=[0.1, 0.7] ) ) def lowerCAmelCase ( self : Optional[int] ) ->List[Any]: """simple docstring""" snake_case_ = argparse.ArgumentParser() expected.add_argument("""--foo""" , default=UpperCAmelCase_ , type=UpperCAmelCase_ ) expected.add_argument("""--bar""" , default=UpperCAmelCase_ , type=UpperCAmelCase_ , help="""help message""" ) expected.add_argument("""--baz""" , default=UpperCAmelCase_ , type=UpperCAmelCase_ ) expected.add_argument("""--ces""" , nargs="""+""" , default=[] , type=UpperCAmelCase_ ) expected.add_argument("""--des""" , nargs="""+""" , default=[] , type=UpperCAmelCase_ ) snake_case_ = [OptionalExample] if is_python_no_less_than_3_10: dataclass_types.append(UpperCAmelCase_ ) for dataclass_type in dataclass_types: snake_case_ = HfArgumentParser(UpperCAmelCase_ ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = parser.parse_args([] ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , bar=UpperCAmelCase_ , baz=UpperCAmelCase_ , ces=[] , des=[] ) ) snake_case_ = parser.parse_args("""--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3""".split() ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=12 , bar=3.14 , baz="""42""" , ces=["""a""", """b""", """c"""] , des=[1, 2, 3] ) ) def lowerCAmelCase ( self : Optional[Any] ) ->Optional[Any]: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = argparse.ArgumentParser() expected.add_argument("""--required_list""" , nargs="""+""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ ) expected.add_argument("""--required_str""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ ) expected.add_argument( """--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=UpperCAmelCase_ , ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : Optional[int] ) ->List[Any]: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = argparse.ArgumentParser() expected.add_argument("""--foo""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ ) expected.add_argument( """--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=UpperCAmelCase_ , ) expected.add_argument("""--opt""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ ) expected.add_argument("""--baz""" , default="""toto""" , type=UpperCAmelCase_ , help="""help message""" ) expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=UpperCAmelCase_ ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : Dict ) ->Tuple: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = { """foo""": 12, """bar""": 3.14, """baz""": """42""", """flag""": True, } snake_case_ = parser.parse_dict(UpperCAmelCase_ )[0] snake_case_ = BasicExample(**UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : Optional[Any] ) ->List[Any]: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = { """foo""": 12, """bar""": 3.14, """baz""": """42""", """flag""": True, """extra""": 42, } self.assertRaises(UpperCAmelCase_ , parser.parse_dict , UpperCAmelCase_ , allow_extra_keys=UpperCAmelCase_ ) def lowerCAmelCase ( self : Any ) ->Dict: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = { """foo""": 12, """bar""": 3.14, """baz""": """42""", """flag""": True, } with tempfile.TemporaryDirectory() as tmp_dir: snake_case_ = os.path.join(UpperCAmelCase_ , """temp_json""" ) os.mkdir(UpperCAmelCase_ ) with open(temp_local_path + """.json""" , """w+""" ) as f: json.dump(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = parser.parse_yaml_file(Path(temp_local_path + """.json""" ) )[0] snake_case_ = BasicExample(**UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : Optional[int] ) ->List[str]: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = { """foo""": 12, """bar""": 3.14, """baz""": """42""", """flag""": True, } with tempfile.TemporaryDirectory() as tmp_dir: snake_case_ = os.path.join(UpperCAmelCase_ , """temp_yaml""" ) os.mkdir(UpperCAmelCase_ ) with open(temp_local_path + """.yaml""" , """w+""" ) as f: yaml.dump(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = parser.parse_yaml_file(Path(temp_local_path + """.yaml""" ) )[0] snake_case_ = BasicExample(**UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : Dict ) ->Any: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) self.assertIsNotNone(UpperCAmelCase_ )
347
"""simple docstring""" import argparse import logging import os import re import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, DataCollatorForLanguageModeling, PushToHubCallback, TFAutoModelForMaskedLM, create_optimizer, ) __SCREAMING_SNAKE_CASE : List[str] = logging.getLogger(__name__) __SCREAMING_SNAKE_CASE : str = tf.data.AUTOTUNE def _a ( ) -> List[str]: snake_case_ = argparse.ArgumentParser(description="""Train a masked language model on TPU.""" ) parser.add_argument( """--pretrained_model_config""" , type=_SCREAMING_SNAKE_CASE , default="""roberta-base""" , help="""The model config to use. Note that we don't copy the model's weights, only the config!""" , ) parser.add_argument( """--tokenizer""" , type=_SCREAMING_SNAKE_CASE , default="""unigram-tokenizer-wikitext""" , help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.""" , ) parser.add_argument( """--per_replica_batch_size""" , type=_SCREAMING_SNAKE_CASE , default=8 , help="""Batch size per TPU core.""" , ) parser.add_argument( """--no_tpu""" , action="""store_true""" , help="""If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.""" , ) parser.add_argument( """--tpu_name""" , type=_SCREAMING_SNAKE_CASE , help="""Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.""" , default="""local""" , ) parser.add_argument( """--tpu_zone""" , type=_SCREAMING_SNAKE_CASE , help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""" , ) parser.add_argument( """--gcp_project""" , type=_SCREAMING_SNAKE_CASE , help="""Google cloud project name. Only used for non-Colab TPU nodes.""" ) parser.add_argument( """--bfloat16""" , action="""store_true""" , help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""" , ) parser.add_argument( """--train_dataset""" , type=_SCREAMING_SNAKE_CASE , help="""Path to training dataset to load. If the path begins with `gs://`""" """ then the dataset will be loaded from a Google Cloud Storage bucket.""" , ) parser.add_argument( """--shuffle_buffer_size""" , type=_SCREAMING_SNAKE_CASE , default=2**18 , help="""Size of the shuffle buffer (in samples)""" , ) parser.add_argument( """--eval_dataset""" , type=_SCREAMING_SNAKE_CASE , help="""Path to evaluation dataset to load. If the path begins with `gs://`""" """ then the dataset will be loaded from a Google Cloud Storage bucket.""" , ) parser.add_argument( """--num_epochs""" , type=_SCREAMING_SNAKE_CASE , default=1 , help="""Number of epochs to train for.""" , ) parser.add_argument( """--learning_rate""" , type=_SCREAMING_SNAKE_CASE , default=1E-4 , help="""Learning rate to use for training.""" , ) parser.add_argument( """--weight_decay_rate""" , type=_SCREAMING_SNAKE_CASE , default=1E-3 , help="""Weight decay rate to use for training.""" , ) parser.add_argument( """--max_length""" , type=_SCREAMING_SNAKE_CASE , default=512 , help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""" , ) parser.add_argument( """--mlm_probability""" , type=_SCREAMING_SNAKE_CASE , default=0.15 , help="""Fraction of tokens to mask during training.""" , ) parser.add_argument("""--output_dir""" , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help="""Path to save model checkpoints to.""" ) parser.add_argument("""--hub_model_id""" , type=_SCREAMING_SNAKE_CASE , help="""Model ID to upload to on the Hugging Face Hub.""" ) snake_case_ = parser.parse_args() return args def _a ( _SCREAMING_SNAKE_CASE ) -> Optional[Any]: try: if args.tpu_name: snake_case_ = tf.distribute.cluster_resolver.TPUClusterResolver( args.tpu_name , zone=args.tpu_zone , project=args.gcp_project ) else: snake_case_ = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: raise RuntimeError( """Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """ """--gcp_project. When running on a TPU VM, use --tpu_name local.""" ) tf.config.experimental_connect_to_cluster(_SCREAMING_SNAKE_CASE ) tf.tpu.experimental.initialize_tpu_system(_SCREAMING_SNAKE_CASE ) return tpu def _a ( _SCREAMING_SNAKE_CASE ) -> List[str]: snake_case_ = 0 for file in file_list: snake_case_ = file.split("""/""" )[-1] snake_case_ = re.search(r"""-\d+-(\d+)\.tfrecord""" , _SCREAMING_SNAKE_CASE ).group(1 ) snake_case_ = int(_SCREAMING_SNAKE_CASE ) num_samples += sample_count return num_samples def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]: snake_case_ = count_samples(_SCREAMING_SNAKE_CASE ) snake_case_ = tf.data.Dataset.from_tensor_slices(_SCREAMING_SNAKE_CASE ) if shuffle: snake_case_ = dataset.shuffle(len(_SCREAMING_SNAKE_CASE ) ) snake_case_ = tf.data.TFRecordDataset(_SCREAMING_SNAKE_CASE , num_parallel_reads=_SCREAMING_SNAKE_CASE ) # TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here snake_case_ = dataset.apply(tf.data.experimental.assert_cardinality(_SCREAMING_SNAKE_CASE ) ) snake_case_ = dataset.map(_SCREAMING_SNAKE_CASE , num_parallel_calls=_SCREAMING_SNAKE_CASE ) if shuffle: assert shuffle_buffer_size is not None snake_case_ = dataset.shuffle(args.shuffle_buffer_size ) snake_case_ = dataset.batch(_SCREAMING_SNAKE_CASE , drop_remainder=_SCREAMING_SNAKE_CASE ) snake_case_ = dataset.map(_SCREAMING_SNAKE_CASE , num_parallel_calls=_SCREAMING_SNAKE_CASE ) snake_case_ = dataset.prefetch(_SCREAMING_SNAKE_CASE ) return dataset def _a ( _SCREAMING_SNAKE_CASE ) -> List[Any]: if not args.no_tpu: snake_case_ = initialize_tpu(_SCREAMING_SNAKE_CASE ) snake_case_ = tf.distribute.TPUStrategy(_SCREAMING_SNAKE_CASE ) else: snake_case_ = tf.distribute.OneDeviceStrategy(device="""/gpu:0""" ) if args.bfloataa: tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" ) snake_case_ = AutoTokenizer.from_pretrained(args.tokenizer ) snake_case_ = AutoConfig.from_pretrained(args.pretrained_model_config ) snake_case_ = tokenizer.vocab_size snake_case_ = tf.io.gfile.glob(os.path.join(args.train_dataset , """*.tfrecord""" ) ) if not training_records: raise ValueError(f"""No .tfrecord files found in {args.train_dataset}.""" ) snake_case_ = tf.io.gfile.glob(os.path.join(args.eval_dataset , """*.tfrecord""" ) ) if not eval_records: raise ValueError(f"""No .tfrecord files found in {args.eval_dataset}.""" ) snake_case_ = count_samples(_SCREAMING_SNAKE_CASE ) snake_case_ = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync) snake_case_ = steps_per_epoch * args.num_epochs with strategy.scope(): snake_case_ = TFAutoModelForMaskedLM.from_config(_SCREAMING_SNAKE_CASE ) model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built snake_case_ , snake_case_ = create_optimizer( num_train_steps=_SCREAMING_SNAKE_CASE , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , ) # Transformers models compute the right loss for their task by default when labels are passed, and will # use this for training unless you specify your own loss function in compile(). model.compile(optimizer=_SCREAMING_SNAKE_CASE , metrics=["""accuracy"""] ) def decode_fn(_SCREAMING_SNAKE_CASE ): snake_case_ = { """input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ), """attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ), } return tf.io.parse_single_example(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can # use their methods in our data pipeline. snake_case_ = DataCollatorForLanguageModeling( tokenizer=_SCREAMING_SNAKE_CASE , mlm_probability=args.mlm_probability , mlm=_SCREAMING_SNAKE_CASE , return_tensors="""tf""" ) def mask_with_collator(_SCREAMING_SNAKE_CASE ): # TF really needs an isin() function snake_case_ = ( ~tf.cast(batch["""attention_mask"""] , tf.bool ) | (batch["""input_ids"""] == tokenizer.cls_token_id) | (batch["""input_ids"""] == tokenizer.sep_token_id) ) snake_case_ , snake_case_ = data_collator.tf_mask_tokens( batch["""input_ids"""] , vocab_size=len(_SCREAMING_SNAKE_CASE ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=_SCREAMING_SNAKE_CASE , ) return batch snake_case_ = args.per_replica_batch_size * strategy.num_replicas_in_sync snake_case_ = prepare_dataset( _SCREAMING_SNAKE_CASE , decode_fn=_SCREAMING_SNAKE_CASE , mask_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , shuffle=_SCREAMING_SNAKE_CASE , shuffle_buffer_size=args.shuffle_buffer_size , ) snake_case_ = prepare_dataset( _SCREAMING_SNAKE_CASE , decode_fn=_SCREAMING_SNAKE_CASE , mask_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , shuffle=_SCREAMING_SNAKE_CASE , ) snake_case_ = [] if args.hub_model_id: callbacks.append( PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=_SCREAMING_SNAKE_CASE ) ) model.fit( _SCREAMING_SNAKE_CASE , validation_data=_SCREAMING_SNAKE_CASE , epochs=args.num_epochs , callbacks=_SCREAMING_SNAKE_CASE , ) model.save_pretrained(args.output_dir ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : Union[str, Any] = parse_args() main(args)
347
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __SCREAMING_SNAKE_CASE : List[str] = { 'configuration_roberta': ['ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RobertaConfig', 'RobertaOnnxConfig'], 'tokenization_roberta': ['RobertaTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : Dict = ['RobertaTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : Dict = [ 'ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST', 'RobertaForCausalLM', 'RobertaForMaskedLM', 'RobertaForMultipleChoice', 'RobertaForQuestionAnswering', 'RobertaForSequenceClassification', 'RobertaForTokenClassification', 'RobertaModel', 'RobertaPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : int = [ 'TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFRobertaForCausalLM', 'TFRobertaForMaskedLM', 'TFRobertaForMultipleChoice', 'TFRobertaForQuestionAnswering', 'TFRobertaForSequenceClassification', 'TFRobertaForTokenClassification', 'TFRobertaMainLayer', 'TFRobertaModel', 'TFRobertaPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : Tuple = [ 'FlaxRobertaForCausalLM', 'FlaxRobertaForMaskedLM', 'FlaxRobertaForMultipleChoice', 'FlaxRobertaForQuestionAnswering', 'FlaxRobertaForSequenceClassification', 'FlaxRobertaForTokenClassification', 'FlaxRobertaModel', 'FlaxRobertaPreTrainedModel', ] if TYPE_CHECKING: from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig from .tokenization_roberta import RobertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roberta_fast import RobertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roberta import ( ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, RobertaForCausalLM, RobertaForMaskedLM, RobertaForMultipleChoice, RobertaForQuestionAnswering, RobertaForSequenceClassification, RobertaForTokenClassification, RobertaModel, RobertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roberta import ( TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForMultipleChoice, TFRobertaForQuestionAnswering, TFRobertaForSequenceClassification, TFRobertaForTokenClassification, TFRobertaMainLayer, TFRobertaModel, TFRobertaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roberta import ( FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, FlaxRobertaPreTrainedModel, ) else: import sys __SCREAMING_SNAKE_CASE : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
347
"""simple docstring""" def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float: if density <= 0: raise ValueError("""Impossible fluid density""" ) if bulk_modulus <= 0: raise ValueError("""Impossible bulk modulus""" ) return (bulk_modulus / density) ** 0.5 if __name__ == "__main__": import doctest doctest.testmod()
347
1
"""simple docstring""" def _a ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: snake_case_ = len(_SCREAMING_SNAKE_CASE ) snake_case_ = sum(_SCREAMING_SNAKE_CASE ) snake_case_ = [[False for x in range(s + 1 )] for y in range(n + 1 )] for i in range(1 , n + 1 ): snake_case_ = True for i in range(1 , s + 1 ): snake_case_ = False for i in range(1 , n + 1 ): for j in range(1 , s + 1 ): snake_case_ = dp[i][j - 1] if arr[i - 1] <= j: snake_case_ = dp[i][j] or dp[i - 1][j - arr[i - 1]] for j in range(int(s / 2 ) , -1 , -1 ): if dp[n][j] is True: snake_case_ = s - 2 * j break return diff
347
"""simple docstring""" def _a ( _SCREAMING_SNAKE_CASE ) -> bool: if num < 0: return False snake_case_ = num snake_case_ = 0 while num > 0: snake_case_ = rev_num * 10 + (num % 10) num //= 10 return num_copy == rev_num if __name__ == "__main__": import doctest doctest.testmod()
347
1
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : Tuple = { 'microsoft/beit-base-patch16-224-pt22k': ( 'https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json' ), # See all BEiT models at https://huggingface.co/models?filter=beit } class __A (snake_case__): '''simple docstring''' __lowercase: Optional[int] = """beit""" def __init__( self : List[str] , UpperCAmelCase_ : List[Any]=8_192 , UpperCAmelCase_ : Dict=768 , UpperCAmelCase_ : int=12 , UpperCAmelCase_ : Tuple=12 , UpperCAmelCase_ : List[Any]=3_072 , UpperCAmelCase_ : Tuple="gelu" , UpperCAmelCase_ : Dict=0.0 , UpperCAmelCase_ : List[str]=0.0 , UpperCAmelCase_ : Any=0.02 , UpperCAmelCase_ : Optional[Any]=1E-12 , UpperCAmelCase_ : int=224 , UpperCAmelCase_ : Tuple=16 , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : int=False , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : Tuple=False , UpperCAmelCase_ : int=False , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : Dict=[3, 5, 7, 11] , UpperCAmelCase_ : Tuple=[1, 2, 3, 6] , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : List[Any]=0.4 , UpperCAmelCase_ : Optional[Any]=256 , UpperCAmelCase_ : Optional[Any]=1 , UpperCAmelCase_ : int=False , UpperCAmelCase_ : Tuple=255 , **UpperCAmelCase_ : List[str] , ) ->Optional[Any]: """simple docstring""" super().__init__(**UpperCAmelCase_ ) snake_case_ = vocab_size snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = initializer_range snake_case_ = layer_norm_eps snake_case_ = image_size snake_case_ = patch_size snake_case_ = num_channels snake_case_ = use_mask_token snake_case_ = use_absolute_position_embeddings snake_case_ = use_relative_position_bias snake_case_ = use_shared_relative_position_bias snake_case_ = layer_scale_init_value snake_case_ = drop_path_rate snake_case_ = use_mean_pooling # decode head attributes (semantic segmentation) snake_case_ = out_indices snake_case_ = pool_scales # auxiliary head attributes (semantic segmentation) snake_case_ = use_auxiliary_head snake_case_ = auxiliary_loss_weight snake_case_ = auxiliary_channels snake_case_ = auxiliary_num_convs snake_case_ = auxiliary_concat_input snake_case_ = semantic_loss_ignore_index class __A (snake_case__): '''simple docstring''' __lowercase: List[Any] = version.parse("""1.11""") @property def lowerCAmelCase ( self : Dict ) ->Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def lowerCAmelCase ( self : Any ) ->float: """simple docstring""" return 1E-4
347
"""simple docstring""" import unittest from transformers import SPIECE_UNDERLINE from transformers.models.speechta import SpeechTaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.tokenization_utils import AddedToken from ...test_tokenization_common import TokenizerTesterMixin __SCREAMING_SNAKE_CASE : Tuple = get_tests_dir('fixtures/test_sentencepiece_bpe_char.model') @require_sentencepiece @require_tokenizers class __A (snake_case__ , unittest.TestCase): '''simple docstring''' __lowercase: Tuple = SpeechTaTokenizer __lowercase: int = False __lowercase: List[str] = True def lowerCAmelCase ( self : Any ) ->str: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing snake_case_ = SpeechTaTokenizer(UpperCAmelCase_ ) snake_case_ = AddedToken("""<mask>""" , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) snake_case_ = mask_token tokenizer.add_special_tokens({"""mask_token""": mask_token} ) tokenizer.add_tokens(["""<ctc_blank>"""] ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : Optional[Any] ) ->Dict: """simple docstring""" snake_case_ = """this is a test""" snake_case_ = """this is a test""" return input_text, output_text def lowerCAmelCase ( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Tuple=20 , UpperCAmelCase_ : Dict=5 ) ->List[Any]: """simple docstring""" snake_case_ , snake_case_ = self.get_input_output_texts(UpperCAmelCase_ ) snake_case_ = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) snake_case_ = tokenizer.decode(UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ ) return text, ids def lowerCAmelCase ( self : Union[str, Any] ) ->Optional[Any]: """simple docstring""" snake_case_ = """<pad>""" snake_case_ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_ ) , UpperCAmelCase_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_ ) , UpperCAmelCase_ ) def lowerCAmelCase ( self : int ) ->str: """simple docstring""" snake_case_ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<s>""" ) self.assertEqual(vocab_keys[1] , """<pad>""" ) self.assertEqual(vocab_keys[-4] , """œ""" ) self.assertEqual(vocab_keys[-2] , """<mask>""" ) self.assertEqual(vocab_keys[-1] , """<ctc_blank>""" ) self.assertEqual(len(UpperCAmelCase_ ) , 81 ) def lowerCAmelCase ( self : Optional[int] ) ->int: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 79 ) def lowerCAmelCase ( self : Optional[int] ) ->List[Any]: """simple docstring""" snake_case_ = self.get_tokenizers(do_lower_case=UpperCAmelCase_ ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): snake_case_ = tokenizer.vocab_size snake_case_ = len(UpperCAmelCase_ ) self.assertNotEqual(UpperCAmelCase_ , 0 ) # We usually have added tokens from the start in tests because our vocab fixtures are # smaller than the original vocabs - let's not assert this # self.assertEqual(vocab_size, all_size) snake_case_ = ["""aaaaa bbbbbb""", """cccccccccdddddddd"""] snake_case_ = tokenizer.add_tokens(UpperCAmelCase_ ) snake_case_ = tokenizer.vocab_size snake_case_ = len(UpperCAmelCase_ ) self.assertNotEqual(UpperCAmelCase_ , 0 ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , len(UpperCAmelCase_ ) ) self.assertEqual(UpperCAmelCase_ , all_size + len(UpperCAmelCase_ ) ) snake_case_ = tokenizer.encode("""aaaaa bbbbbb low cccccccccdddddddd l""" , add_special_tokens=UpperCAmelCase_ ) self.assertGreaterEqual(len(UpperCAmelCase_ ) , 4 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) snake_case_ = {"""eos_token""": """>>>>|||<||<<|<<""", """pad_token""": """<<<<<|||>|>>>>|>"""} snake_case_ = tokenizer.add_special_tokens(UpperCAmelCase_ ) snake_case_ = tokenizer.vocab_size snake_case_ = len(UpperCAmelCase_ ) self.assertNotEqual(UpperCAmelCase_ , 0 ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , len(UpperCAmelCase_ ) ) self.assertEqual(UpperCAmelCase_ , all_size_a + len(UpperCAmelCase_ ) ) snake_case_ = tokenizer.encode( """>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l""" , add_special_tokens=UpperCAmelCase_ ) self.assertGreaterEqual(len(UpperCAmelCase_ ) , 6 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[0] , tokens[1] ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokens[-4] ) self.assertEqual(tokens[0] , tokenizer.eos_token_id ) self.assertEqual(tokens[-3] , tokenizer.pad_token_id ) def lowerCAmelCase ( self : Optional[Any] ) ->Tuple: """simple docstring""" pass def lowerCAmelCase ( self : List[str] ) ->Optional[Any]: """simple docstring""" pass def lowerCAmelCase ( self : List[str] ) ->List[str]: """simple docstring""" snake_case_ = self.get_tokenizer() snake_case_ = tokenizer.tokenize("""This is a test""" ) # fmt: off self.assertListEqual(UpperCAmelCase_ , [SPIECE_UNDERLINE, """T""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """a""", SPIECE_UNDERLINE, """t""", """e""", """s""", """t"""] ) # fmt: on self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , ) snake_case_ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( UpperCAmelCase_ , [SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """92000""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] ) snake_case_ = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) # fmt: off self.assertListEqual(UpperCAmelCase_ , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] ) # fmt: on snake_case_ = tokenizer.convert_ids_to_tokens(UpperCAmelCase_ ) self.assertListEqual( UpperCAmelCase_ , [SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """<unk>""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] ) @slow def lowerCAmelCase ( self : str ) ->Dict: """simple docstring""" snake_case_ = [ """Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides """ """general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural """ """Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained """ """models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.""", """BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly """ """conditioning on both left and right context in all layers.""", """The quick brown fox jumps over the lazy dog.""", ] # fmt: off snake_case_ = { """input_ids""": [ [4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2], [4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], ], """attention_mask""": [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] } # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCAmelCase_ , model_name="""microsoft/speecht5_asr""" , revision="""c5ef64c71905caeccde0e4462ef3f9077224c524""" , sequences=UpperCAmelCase_ , )
347
1
"""simple docstring""" import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import YolosImageProcessor class __A (unittest.TestCase): '''simple docstring''' def __init__( self : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str]=7 , UpperCAmelCase_ : Union[str, Any]=3 , UpperCAmelCase_ : Tuple=30 , UpperCAmelCase_ : Any=400 , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : int=None , UpperCAmelCase_ : str=True , UpperCAmelCase_ : List[str]=[0.5, 0.5, 0.5] , UpperCAmelCase_ : List[Any]=[0.5, 0.5, 0.5] , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : List[str]=1 / 255 , UpperCAmelCase_ : Optional[Any]=True , ) ->List[Any]: """simple docstring""" snake_case_ = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1_333} snake_case_ = parent snake_case_ = batch_size snake_case_ = num_channels snake_case_ = min_resolution snake_case_ = max_resolution snake_case_ = do_resize snake_case_ = size snake_case_ = do_normalize snake_case_ = image_mean snake_case_ = image_std snake_case_ = do_rescale snake_case_ = rescale_factor snake_case_ = do_pad def lowerCAmelCase ( self : str ) ->List[str]: """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def lowerCAmelCase ( self : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str]=False ) ->Tuple: """simple docstring""" if not batched: snake_case_ = image_inputs[0] if isinstance(UpperCAmelCase_ , Image.Image ): snake_case_ , snake_case_ = image.size else: snake_case_ , snake_case_ = image.shape[1], image.shape[2] if w < h: snake_case_ = int(self.size["""shortest_edge"""] * h / w ) snake_case_ = self.size["""shortest_edge"""] elif w > h: snake_case_ = self.size["""shortest_edge"""] snake_case_ = int(self.size["""shortest_edge"""] * w / h ) else: snake_case_ = self.size["""shortest_edge"""] snake_case_ = self.size["""shortest_edge"""] else: snake_case_ = [] for image in image_inputs: snake_case_ , snake_case_ = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) snake_case_ = max(UpperCAmelCase_ , key=lambda UpperCAmelCase_ : item[0] )[0] snake_case_ = max(UpperCAmelCase_ , key=lambda UpperCAmelCase_ : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class __A (snake_case__ , unittest.TestCase): '''simple docstring''' __lowercase: List[Any] = YolosImageProcessor if is_vision_available() else None def lowerCAmelCase ( self : List[Any] ) ->Union[str, Any]: """simple docstring""" snake_case_ = YolosImageProcessingTester(self ) @property def lowerCAmelCase ( self : Tuple ) ->List[str]: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase ( self : Optional[int] ) ->Dict: """simple docstring""" snake_case_ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCAmelCase_ , """image_mean""" ) ) self.assertTrue(hasattr(UpperCAmelCase_ , """image_std""" ) ) self.assertTrue(hasattr(UpperCAmelCase_ , """do_normalize""" ) ) self.assertTrue(hasattr(UpperCAmelCase_ , """do_resize""" ) ) self.assertTrue(hasattr(UpperCAmelCase_ , """size""" ) ) def lowerCAmelCase ( self : Tuple ) ->Dict: """simple docstring""" snake_case_ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1_333} ) self.assertEqual(image_processor.do_pad , UpperCAmelCase_ ) snake_case_ = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCAmelCase_ ) self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} ) self.assertEqual(image_processor.do_pad , UpperCAmelCase_ ) def lowerCAmelCase ( self : Optional[int] ) ->Optional[int]: """simple docstring""" pass def lowerCAmelCase ( self : List[str] ) ->Dict: """simple docstring""" snake_case_ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase_ , Image.Image ) # Test not batched input snake_case_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(UpperCAmelCase_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(UpperCAmelCase_ , batched=UpperCAmelCase_ ) snake_case_ = image_processing(UpperCAmelCase_ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def lowerCAmelCase ( self : int ) ->Optional[int]: """simple docstring""" snake_case_ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase_ , np.ndarray ) # Test not batched input snake_case_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(UpperCAmelCase_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched snake_case_ = image_processing(UpperCAmelCase_ , return_tensors="""pt""" ).pixel_values snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(UpperCAmelCase_ , batched=UpperCAmelCase_ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def lowerCAmelCase ( self : Optional[Any] ) ->List[str]: """simple docstring""" snake_case_ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase_ , torch.Tensor ) # Test not batched input snake_case_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(UpperCAmelCase_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched snake_case_ = image_processing(UpperCAmelCase_ , return_tensors="""pt""" ).pixel_values snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(UpperCAmelCase_ , batched=UpperCAmelCase_ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def lowerCAmelCase ( self : Optional[int] ) ->Any: """simple docstring""" snake_case_ = self.image_processing_class(**self.image_processor_dict ) snake_case_ = self.image_processing_class(do_resize=UpperCAmelCase_ , do_normalize=UpperCAmelCase_ , do_rescale=UpperCAmelCase_ ) # create random PyTorch tensors snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase_ , torch.Tensor ) # Test whether the method "pad" and calling the image processor return the same tensors snake_case_ = image_processing_a.pad(UpperCAmelCase_ , return_tensors="""pt""" ) snake_case_ = image_processing_a(UpperCAmelCase_ , return_tensors="""pt""" ) self.assertTrue( torch.allclose(encoded_images_with_method["""pixel_values"""] , encoded_images["""pixel_values"""] , atol=1E-4 ) ) @slow def lowerCAmelCase ( self : List[Any] ) ->Optional[int]: """simple docstring""" snake_case_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f: snake_case_ = json.loads(f.read() ) snake_case_ = {"""image_id""": 39_769, """annotations""": target} # encode them snake_case_ = YolosImageProcessor.from_pretrained("""hustvl/yolos-small""" ) snake_case_ = image_processing(images=UpperCAmelCase_ , annotations=UpperCAmelCase_ , return_tensors="""pt""" ) # verify pixel values snake_case_ = torch.Size([1, 3, 800, 1_066] ) self.assertEqual(encoding["""pixel_values"""].shape , UpperCAmelCase_ ) snake_case_ = torch.tensor([0.2_796, 0.3_138, 0.3_481] ) self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , UpperCAmelCase_ , atol=1E-4 ) ) # verify area snake_case_ = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , UpperCAmelCase_ ) ) # verify boxes snake_case_ = torch.Size([6, 4] ) self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , UpperCAmelCase_ ) snake_case_ = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , UpperCAmelCase_ , atol=1E-3 ) ) # verify image_id snake_case_ = torch.tensor([39_769] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , UpperCAmelCase_ ) ) # verify is_crowd snake_case_ = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , UpperCAmelCase_ ) ) # verify class_labels snake_case_ = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , UpperCAmelCase_ ) ) # verify orig_size snake_case_ = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , UpperCAmelCase_ ) ) # verify size snake_case_ = torch.tensor([800, 1_066] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , UpperCAmelCase_ ) ) @slow def lowerCAmelCase ( self : Union[str, Any] ) ->List[Any]: """simple docstring""" snake_case_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f: snake_case_ = json.loads(f.read() ) snake_case_ = {"""file_name""": """000000039769.png""", """image_id""": 39_769, """segments_info""": target} snake_case_ = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" ) # encode them snake_case_ = YolosImageProcessor(format="""coco_panoptic""" ) snake_case_ = image_processing(images=UpperCAmelCase_ , annotations=UpperCAmelCase_ , masks_path=UpperCAmelCase_ , return_tensors="""pt""" ) # verify pixel values snake_case_ = torch.Size([1, 3, 800, 1_066] ) self.assertEqual(encoding["""pixel_values"""].shape , UpperCAmelCase_ ) snake_case_ = torch.tensor([0.2_796, 0.3_138, 0.3_481] ) self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , UpperCAmelCase_ , atol=1E-4 ) ) # verify area snake_case_ = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , UpperCAmelCase_ ) ) # verify boxes snake_case_ = torch.Size([6, 4] ) self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , UpperCAmelCase_ ) snake_case_ = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , UpperCAmelCase_ , atol=1E-3 ) ) # verify image_id snake_case_ = torch.tensor([39_769] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , UpperCAmelCase_ ) ) # verify is_crowd snake_case_ = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , UpperCAmelCase_ ) ) # verify class_labels snake_case_ = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , UpperCAmelCase_ ) ) # verify masks snake_case_ = 822_873 self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , UpperCAmelCase_ ) # verify orig_size snake_case_ = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , UpperCAmelCase_ ) ) # verify size snake_case_ = torch.tensor([800, 1_066] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , UpperCAmelCase_ ) )
347
"""simple docstring""" import datasets __SCREAMING_SNAKE_CASE : Tuple = '\\n@InProceedings{conneau2018xnli,\n author = "Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin",\n title = "XNLI: Evaluating Cross-lingual Sentence Representations",\n booktitle = "Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing",\n year = "2018",\n publisher = "Association for Computational Linguistics",\n location = "Brussels, Belgium",\n}\n' __SCREAMING_SNAKE_CASE : Dict = '\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n' __SCREAMING_SNAKE_CASE : List[str] = '\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n \'accuracy\': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric("xnli")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n' def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]: return (preds == labels).mean() @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class __A (datasets.Metric): '''simple docstring''' def lowerCAmelCase ( self : str ) ->Any: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""int64""" if self.config_name != """sts-b""" else """float32""" ), """references""": datasets.Value("""int64""" if self.config_name != """sts-b""" else """float32""" ), } ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" , ) def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any ) ->int: """simple docstring""" return {"accuracy": simple_accuracy(UpperCAmelCase_ , UpperCAmelCase_ )}
347
1
"""simple docstring""" class __A : '''simple docstring''' def __init__( self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : int=None ) ->int: """simple docstring""" snake_case_ = data snake_case_ = previous snake_case_ = next_node def __str__( self : Tuple ) ->str: """simple docstring""" return F"""{self.data}""" def lowerCAmelCase ( self : Union[str, Any] ) ->int: """simple docstring""" return self.data def lowerCAmelCase ( self : Dict ) ->List[str]: """simple docstring""" return self.next def lowerCAmelCase ( self : Any ) ->Optional[int]: """simple docstring""" return self.previous class __A : '''simple docstring''' def __init__( self : str , UpperCAmelCase_ : Any ) ->List[Any]: """simple docstring""" snake_case_ = head def __iter__( self : Optional[Any] ) ->Tuple: """simple docstring""" return self def lowerCAmelCase ( self : Tuple ) ->str: """simple docstring""" if not self.current: raise StopIteration else: snake_case_ = self.current.get_data() snake_case_ = self.current.get_next() return value class __A : '''simple docstring''' def __init__( self : str ) ->Union[str, Any]: """simple docstring""" snake_case_ = None # First node in list snake_case_ = None # Last node in list def __str__( self : str ) ->Tuple: """simple docstring""" snake_case_ = self.head snake_case_ = [] while current is not None: nodes.append(current.get_data() ) snake_case_ = current.get_next() return " ".join(str(UpperCAmelCase_ ) for node in nodes ) def __contains__( self : Optional[Any] , UpperCAmelCase_ : int ) ->Any: """simple docstring""" snake_case_ = self.head while current: if current.get_data() == value: return True snake_case_ = current.get_next() return False def __iter__( self : int ) ->int: """simple docstring""" return LinkedListIterator(self.head ) def lowerCAmelCase ( self : int ) ->List[Any]: """simple docstring""" if self.head: return self.head.get_data() return None def lowerCAmelCase ( self : Optional[Any] ) ->int: """simple docstring""" if self.tail: return self.tail.get_data() return None def lowerCAmelCase ( self : List[Any] , UpperCAmelCase_ : Node ) ->None: """simple docstring""" if self.head is None: snake_case_ = node snake_case_ = node else: self.insert_before_node(self.head , UpperCAmelCase_ ) def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : Node ) ->None: """simple docstring""" if self.head is None: self.set_head(UpperCAmelCase_ ) else: self.insert_after_node(self.tail , UpperCAmelCase_ ) def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : int ) ->None: """simple docstring""" snake_case_ = Node(UpperCAmelCase_ ) if self.head is None: self.set_head(UpperCAmelCase_ ) else: self.set_tail(UpperCAmelCase_ ) def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : Node , UpperCAmelCase_ : Node ) ->None: """simple docstring""" snake_case_ = node snake_case_ = node.previous if node.get_previous() is None: snake_case_ = node_to_insert else: snake_case_ = node_to_insert snake_case_ = node_to_insert def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : Node , UpperCAmelCase_ : Node ) ->None: """simple docstring""" snake_case_ = node snake_case_ = node.next if node.get_next() is None: snake_case_ = node_to_insert else: snake_case_ = node_to_insert snake_case_ = node_to_insert def lowerCAmelCase ( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ) ->None: """simple docstring""" snake_case_ = 1 snake_case_ = Node(UpperCAmelCase_ ) snake_case_ = self.head while node: if current_position == position: self.insert_before_node(UpperCAmelCase_ , UpperCAmelCase_ ) return current_position += 1 snake_case_ = node.next self.insert_after_node(self.tail , UpperCAmelCase_ ) def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : int ) ->Node: """simple docstring""" snake_case_ = self.head while node: if node.get_data() == item: return node snake_case_ = node.get_next() raise Exception("""Node not found""" ) def lowerCAmelCase ( self : List[Any] , UpperCAmelCase_ : str ) ->str: """simple docstring""" if (node := self.get_node(UpperCAmelCase_ )) is not None: if node == self.head: snake_case_ = self.head.get_next() if node == self.tail: snake_case_ = self.tail.get_previous() self.remove_node_pointers(UpperCAmelCase_ ) @staticmethod def lowerCAmelCase ( UpperCAmelCase_ : Node ) ->None: """simple docstring""" if node.get_next(): snake_case_ = node.previous if node.get_previous(): snake_case_ = node.next snake_case_ = None snake_case_ = None def lowerCAmelCase ( self : List[str] ) ->List[str]: """simple docstring""" return self.head is None def _a ( ) -> None: pass if __name__ == "__main__": import doctest doctest.testmod()
347
"""simple docstring""" from ..utils import DummyObject, requires_backends class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: List[Any] = ["""sentencepiece"""] def __init__( self : int , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[str] ) ->List[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Optional[int] = ["""sentencepiece"""] def __init__( self : Optional[int] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Tuple ) ->Dict: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Any = ["""sentencepiece"""] def __init__( self : Any , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : List[Any] ) ->List[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Dict = ["""sentencepiece"""] def __init__( self : List[str] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : int ) ->Optional[int]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: List[str] = ["""sentencepiece"""] def __init__( self : Dict , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : int ) ->List[str]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: int = ["""sentencepiece"""] def __init__( self : Tuple , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Dict ) ->Optional[int]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: int = ["""sentencepiece"""] def __init__( self : int , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Union[str, Any] ) ->Dict: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Optional[Any] = ["""sentencepiece"""] def __init__( self : List[str] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Optional[Any] ) ->Union[str, Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Optional[int] = ["""sentencepiece"""] def __init__( self : Optional[Any] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : List[Any] ) ->Tuple: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Any = ["""sentencepiece"""] def __init__( self : List[Any] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : int ) ->List[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Any = ["""sentencepiece"""] def __init__( self : int , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : List[Any] ) ->Tuple: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: int = ["""sentencepiece"""] def __init__( self : Optional[int] , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Union[str, Any] ) ->Union[str, Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Tuple = ["""sentencepiece"""] def __init__( self : Dict , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : List[Any] ) ->Dict: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Union[str, Any] = ["""sentencepiece"""] def __init__( self : List[Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : List[Any] ) ->Optional[int]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: int = ["""sentencepiece"""] def __init__( self : int , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : List[str] ) ->Union[str, Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Tuple = ["""sentencepiece"""] def __init__( self : int , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Optional[Any] ) ->str: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: List[Any] = ["""sentencepiece"""] def __init__( self : Dict , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Optional[Any] ) ->int: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Union[str, Any] = ["""sentencepiece"""] def __init__( self : Optional[Any] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Optional[int] ) ->Optional[int]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Any = ["""sentencepiece"""] def __init__( self : Optional[Any] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Dict ) ->Dict: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Any = ["""sentencepiece"""] def __init__( self : List[Any] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : List[str] ) ->List[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: int = ["""sentencepiece"""] def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Optional[Any] ) ->List[str]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: int = ["""sentencepiece"""] def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : List[Any] ) ->Union[str, Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: List[Any] = ["""sentencepiece"""] def __init__( self : Any , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Optional[Any] ) ->List[str]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Dict = ["""sentencepiece"""] def __init__( self : int , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Union[str, Any] ) ->List[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: List[Any] = ["""sentencepiece"""] def __init__( self : List[Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Optional[int] ) ->Optional[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Union[str, Any] = ["""sentencepiece"""] def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : str ) ->Optional[int]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Optional[int] = ["""sentencepiece"""] def __init__( self : Tuple , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Optional[int] ) ->Dict: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Dict = ["""sentencepiece"""] def __init__( self : Optional[int] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : List[str] ) ->Optional[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: int = ["""sentencepiece"""] def __init__( self : Dict , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Optional[int] ) ->Any: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: List[str] = ["""sentencepiece"""] def __init__( self : List[str] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Union[str, Any] ) ->Optional[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Any = ["""sentencepiece"""] def __init__( self : List[str] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Optional[int] ) ->str: """simple docstring""" requires_backends(self , ["""sentencepiece"""] )
347
1
"""simple docstring""" import argparse import json import numpy import torch from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int: # Load checkpoint snake_case_ = torch.load(_SCREAMING_SNAKE_CASE , map_location="""cpu""" ) snake_case_ = chkpt["""model"""] # We have the base model one level deeper than the original XLM repository snake_case_ = {} for k, v in state_dict.items(): if "pred_layer" in k: snake_case_ = v else: snake_case_ = v snake_case_ = chkpt["""params"""] snake_case_ = {n: v for n, v in config.items() if not isinstance(_SCREAMING_SNAKE_CASE , (torch.FloatTensor, numpy.ndarray) )} snake_case_ = chkpt["""dico_word2id"""] snake_case_ = {s + """</w>""" if s.find("""@@""" ) == -1 and i > 13 else s.replace("""@@""" , """""" ): i for s, i in vocab.items()} # Save pytorch-model snake_case_ = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME snake_case_ = pytorch_dump_folder_path + """/""" + CONFIG_NAME snake_case_ = pytorch_dump_folder_path + """/""" + VOCAB_FILES_NAMES["""vocab_file"""] print(f"""Save PyTorch model to {pytorch_weights_dump_path}""" ) torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) print(f"""Save configuration file to {pytorch_config_dump_path}""" ) with open(_SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(_SCREAMING_SNAKE_CASE , indent=2 ) + """\n""" ) print(f"""Save vocab file to {pytorch_config_dump_path}""" ) with open(_SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(_SCREAMING_SNAKE_CASE , indent=2 ) + """\n""" ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--xlm_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) __SCREAMING_SNAKE_CASE : Tuple = parser.parse_args() convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
347
"""simple docstring""" import warnings from ...utils import logging from .image_processing_mobilevit import MobileViTImageProcessor __SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__) class __A (snake_case__): '''simple docstring''' def __init__( self : str , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : int ) ->None: """simple docstring""" warnings.warn( """The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use MobileViTImageProcessor instead.""" , UpperCAmelCase_ , ) super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
347
1
"""simple docstring""" def _a ( _SCREAMING_SNAKE_CASE ) -> bool: if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): snake_case_ = f"""Input value of [number={number}] must be an integer""" raise TypeError(_SCREAMING_SNAKE_CASE ) if number < 0: return False snake_case_ = number * number while number > 0: if number % 10 != number_square % 10: return False number //= 10 number_square //= 10 return True if __name__ == "__main__": import doctest doctest.testmod()
347
"""simple docstring""" import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel from transformers.utils import logging logging.set_verbosity_info() __SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Any: snake_case_ = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") ) # projection layer + position embeddings rename_keys.extend( [ ("""cls_token""", """vit.embeddings.cls_token"""), ("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""), ("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""), ("""pos_embed""", """vit.embeddings.position_embeddings"""), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("""norm.weight""", """layernorm.weight"""), ("""norm.bias""", """layernorm.bias"""), ("""pre_logits.fc.weight""", """pooler.dense.weight"""), ("""pre_logits.fc.bias""", """pooler.dense.bias"""), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" snake_case_ = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("""norm.weight""", """vit.layernorm.weight"""), ("""norm.bias""", """vit.layernorm.bias"""), ("""head.weight""", """classifier.weight"""), ("""head.bias""", """classifier.bias"""), ] ) return rename_keys def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Tuple: for i in range(config.num_hidden_layers ): if base_model: snake_case_ = """""" else: snake_case_ = """vit.""" # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) snake_case_ = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" ) snake_case_ = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict snake_case_ = in_proj_weight[ : config.hidden_size, : ] snake_case_ = in_proj_bias[: config.hidden_size] snake_case_ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] snake_case_ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] snake_case_ = in_proj_weight[ -config.hidden_size :, : ] snake_case_ = in_proj_bias[-config.hidden_size :] def _a ( _SCREAMING_SNAKE_CASE ) -> List[str]: snake_case_ = ["""head.weight""", """head.bias"""] for k in ignore_keys: state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str: snake_case_ = dct.pop(_SCREAMING_SNAKE_CASE ) snake_case_ = val def _a ( ) -> Any: snake_case_ = """http://images.cocodataset.org/val2017/000000039769.jpg""" snake_case_ = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ) return im @torch.no_grad() def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any: snake_case_ = ViTConfig() snake_case_ = False # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size if vit_name[-5:] == "in21k": snake_case_ = True snake_case_ = int(vit_name[-12:-10] ) snake_case_ = int(vit_name[-9:-6] ) else: snake_case_ = 1_000 snake_case_ = """huggingface/label-files""" snake_case_ = """imagenet-1k-id2label.json""" snake_case_ = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) ) snake_case_ = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} snake_case_ = idalabel snake_case_ = {v: k for k, v in idalabel.items()} snake_case_ = int(vit_name[-6:-4] ) snake_case_ = int(vit_name[-3:] ) # size of the architecture if "deit" in vit_name: if vit_name[9:].startswith("""tiny""" ): snake_case_ = 192 snake_case_ = 768 snake_case_ = 12 snake_case_ = 3 elif vit_name[9:].startswith("""small""" ): snake_case_ = 384 snake_case_ = 1_536 snake_case_ = 12 snake_case_ = 6 else: pass else: if vit_name[4:].startswith("""small""" ): snake_case_ = 768 snake_case_ = 2_304 snake_case_ = 8 snake_case_ = 8 elif vit_name[4:].startswith("""base""" ): pass elif vit_name[4:].startswith("""large""" ): snake_case_ = 1_024 snake_case_ = 4_096 snake_case_ = 24 snake_case_ = 16 elif vit_name[4:].startswith("""huge""" ): snake_case_ = 1_280 snake_case_ = 5_120 snake_case_ = 32 snake_case_ = 16 # load original model from timm snake_case_ = timm.create_model(_SCREAMING_SNAKE_CASE , pretrained=_SCREAMING_SNAKE_CASE ) timm_model.eval() # load state_dict of original model, remove and rename some keys snake_case_ = timm_model.state_dict() if base_model: remove_classification_head_(_SCREAMING_SNAKE_CASE ) snake_case_ = create_rename_keys(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for src, dest in rename_keys: rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) read_in_q_k_v(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # load HuggingFace model if vit_name[-5:] == "in21k": snake_case_ = ViTModel(_SCREAMING_SNAKE_CASE ).eval() else: snake_case_ = ViTForImageClassification(_SCREAMING_SNAKE_CASE ).eval() model.load_state_dict(_SCREAMING_SNAKE_CASE ) # Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor if "deit" in vit_name: snake_case_ = DeiTImageProcessor(size=config.image_size ) else: snake_case_ = ViTImageProcessor(size=config.image_size ) snake_case_ = image_processor(images=prepare_img() , return_tensors="""pt""" ) snake_case_ = encoding["""pixel_values"""] snake_case_ = model(_SCREAMING_SNAKE_CASE ) if base_model: snake_case_ = timm_model.forward_features(_SCREAMING_SNAKE_CASE ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(_SCREAMING_SNAKE_CASE , outputs.pooler_output , atol=1E-3 ) else: snake_case_ = timm_model(_SCREAMING_SNAKE_CASE ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(_SCREAMING_SNAKE_CASE , outputs.logits , atol=1E-3 ) Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) print(f"""Saving model {vit_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser() # Required parameters parser.add_argument( '--vit_name', default='vit_base_patch16_224', type=str, help='Name of the ViT timm model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) __SCREAMING_SNAKE_CASE : int = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
347
1
"""simple docstring""" __SCREAMING_SNAKE_CASE : Any = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' def _a ( ) -> None: snake_case_ = input("""Enter message: """ ) snake_case_ = input("""Enter key [alphanumeric]: """ ) snake_case_ = input("""Encrypt/Decrypt [e/d]: """ ) if mode.lower().startswith("""e""" ): snake_case_ = """encrypt""" snake_case_ = encrypt_message(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) elif mode.lower().startswith("""d""" ): snake_case_ = """decrypt""" snake_case_ = decrypt_message(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) print(f"""\n{mode.title()}ed message:""" ) print(_SCREAMING_SNAKE_CASE ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str: return translate_message(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , """encrypt""" ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str: return translate_message(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , """decrypt""" ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str: snake_case_ = [] snake_case_ = 0 snake_case_ = key.upper() for symbol in message: snake_case_ = LETTERS.find(symbol.upper() ) if num != -1: if mode == "encrypt": num += LETTERS.find(key[key_index] ) elif mode == "decrypt": num -= LETTERS.find(key[key_index] ) num %= len(_SCREAMING_SNAKE_CASE ) if symbol.isupper(): translated.append(LETTERS[num] ) elif symbol.islower(): translated.append(LETTERS[num].lower() ) key_index += 1 if key_index == len(_SCREAMING_SNAKE_CASE ): snake_case_ = 0 else: translated.append(_SCREAMING_SNAKE_CASE ) return "".join(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
347
"""simple docstring""" import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class __A (unittest.TestCase): '''simple docstring''' def __init__( self : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple=13 , UpperCAmelCase_ : List[Any]=7 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : int=True , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Dict=99 , UpperCAmelCase_ : str=32 , UpperCAmelCase_ : Tuple=5 , UpperCAmelCase_ : Union[str, Any]=4 , UpperCAmelCase_ : Any=37 , UpperCAmelCase_ : int="gelu" , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Dict=512 , UpperCAmelCase_ : Optional[Any]=16 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : str=4 , ) ->Tuple: """simple docstring""" snake_case_ = parent snake_case_ = batch_size snake_case_ = seq_length snake_case_ = is_training snake_case_ = use_attention_mask snake_case_ = use_token_type_ids snake_case_ = use_labels snake_case_ = vocab_size snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = max_position_embeddings snake_case_ = type_vocab_size snake_case_ = type_sequence_label_size snake_case_ = initializer_range snake_case_ = num_choices def lowerCAmelCase ( self : Optional[int] ) ->str: """simple docstring""" snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case_ = None if self.use_attention_mask: snake_case_ = random_attention_mask([self.batch_size, self.seq_length] ) snake_case_ = None if self.use_token_type_ids: snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) snake_case_ = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def lowerCAmelCase ( self : List[str] ) ->Dict: """simple docstring""" snake_case_ = self.prepare_config_and_inputs() snake_case_ , snake_case_ , snake_case_ , snake_case_ = config_and_inputs snake_case_ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict @require_flax class __A (snake_case__ , unittest.TestCase): '''simple docstring''' __lowercase: Union[str, Any] = True __lowercase: int = ( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def lowerCAmelCase ( self : Optional[Any] ) ->Tuple: """simple docstring""" snake_case_ = FlaxRoFormerModelTester(self ) @slow def lowerCAmelCase ( self : Any ) ->List[str]: """simple docstring""" for model_class_name in self.all_model_classes: snake_case_ = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=UpperCAmelCase_ ) snake_case_ = model(np.ones((1, 1) ) ) self.assertIsNotNone(UpperCAmelCase_ ) @require_flax class __A (unittest.TestCase): '''simple docstring''' @slow def lowerCAmelCase ( self : str ) ->Dict: """simple docstring""" snake_case_ = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" ) snake_case_ = jnp.array([[0, 1, 2, 3, 4, 5]] ) snake_case_ = model(UpperCAmelCase_ )[0] snake_case_ = 50_000 snake_case_ = (1, 6, vocab_size) self.assertEqual(output.shape , UpperCAmelCase_ ) snake_case_ = jnp.array( [[[-0.1_205, -1.0_265, 0.2_922], [-1.5_134, 0.1_974, 0.1_519], [-5.0_135, -3.9_003, -0.8_404]]] ) self.assertTrue(jnp.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-4 ) )
347
1
"""simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5] __SCREAMING_SNAKE_CASE : Tuple = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5] __SCREAMING_SNAKE_CASE : Dict = { 0: 'Sunday', 1: 'Monday', 2: 'Tuesday', 3: 'Wednesday', 4: 'Thursday', 5: 'Friday', 6: 'Saturday', } def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str: assert len(str(_SCREAMING_SNAKE_CASE ) ) > 2, "year should be in YYYY format" assert 1 <= month <= 12, "month should be between 1 to 12" assert 1 <= day <= 31, "day should be between 1 to 31" # Doomsday algorithm: snake_case_ = year // 100 snake_case_ = (5 * (century % 4) + 2) % 7 snake_case_ = year % 100 snake_case_ = centurian % 12 snake_case_ = ( (centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor ) % 7 snake_case_ = ( DOOMSDAY_NOT_LEAP[month - 1] if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0) else DOOMSDAY_LEAP[month - 1] ) snake_case_ = (dooms_day + day - day_anchor) % 7 return WEEK_DAY_NAMES[week_day] if __name__ == "__main__": import doctest doctest.testmod()
347
"""simple docstring""" from __future__ import annotations def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> bool: snake_case_ = get_failure_array(_SCREAMING_SNAKE_CASE ) # 2) Step through text searching for pattern snake_case_ , snake_case_ = 0, 0 # index into text, pattern while i < len(_SCREAMING_SNAKE_CASE ): if pattern[j] == text[i]: if j == (len(_SCREAMING_SNAKE_CASE ) - 1): return True j += 1 # if this is a prefix in our pattern # just go back far enough to continue elif j > 0: snake_case_ = failure[j - 1] continue i += 1 return False def _a ( _SCREAMING_SNAKE_CASE ) -> list[int]: snake_case_ = [0] snake_case_ = 0 snake_case_ = 1 while j < len(_SCREAMING_SNAKE_CASE ): if pattern[i] == pattern[j]: i += 1 elif i > 0: snake_case_ = failure[i - 1] continue j += 1 failure.append(_SCREAMING_SNAKE_CASE ) return failure if __name__ == "__main__": # Test 1) __SCREAMING_SNAKE_CASE : Optional[int] = 'abc1abc12' __SCREAMING_SNAKE_CASE : Optional[int] = 'alskfjaldsabc1abc1abc12k23adsfabcabc' __SCREAMING_SNAKE_CASE : List[str] = 'alskfjaldsk23adsfabcabc' assert kmp(pattern, texta) and not kmp(pattern, texta) # Test 2) __SCREAMING_SNAKE_CASE : int = 'ABABX' __SCREAMING_SNAKE_CASE : Optional[Any] = 'ABABZABABYABABX' assert kmp(pattern, text) # Test 3) __SCREAMING_SNAKE_CASE : Any = 'AAAB' __SCREAMING_SNAKE_CASE : List[Any] = 'ABAAAAAB' assert kmp(pattern, text) # Test 4) __SCREAMING_SNAKE_CASE : Optional[int] = 'abcdabcy' __SCREAMING_SNAKE_CASE : str = 'abcxabcdabxabcdabcdabcy' assert kmp(pattern, text) # Test 5) __SCREAMING_SNAKE_CASE : Any = 'aabaabaaa' assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
347
1
"""simple docstring""" import argparse import json import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification def _a ( _SCREAMING_SNAKE_CASE ) -> Optional[int]: snake_case_ = SwinConfig() snake_case_ = swin_name.split("""_""" ) snake_case_ = name_split[1] snake_case_ = int(name_split[4] ) snake_case_ = int(name_split[3][-1] ) if model_size == "tiny": snake_case_ = 96 snake_case_ = (2, 2, 6, 2) snake_case_ = (3, 6, 12, 24) elif model_size == "small": snake_case_ = 96 snake_case_ = (2, 2, 18, 2) snake_case_ = (3, 6, 12, 24) elif model_size == "base": snake_case_ = 128 snake_case_ = (2, 2, 18, 2) snake_case_ = (4, 8, 16, 32) else: snake_case_ = 192 snake_case_ = (2, 2, 18, 2) snake_case_ = (6, 12, 24, 48) if "in22k" in swin_name: snake_case_ = 21_841 else: snake_case_ = 1_000 snake_case_ = """huggingface/label-files""" snake_case_ = """imagenet-1k-id2label.json""" snake_case_ = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) ) snake_case_ = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} snake_case_ = idalabel snake_case_ = {v: k for k, v in idalabel.items()} snake_case_ = img_size snake_case_ = num_classes snake_case_ = embed_dim snake_case_ = depths snake_case_ = num_heads snake_case_ = window_size return config def _a ( _SCREAMING_SNAKE_CASE ) -> Any: if "patch_embed.proj" in name: snake_case_ = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" ) if "patch_embed.norm" in name: snake_case_ = name.replace("""patch_embed.norm""" , """embeddings.norm""" ) if "layers" in name: snake_case_ = """encoder.""" + name if "attn.proj" in name: snake_case_ = name.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in name: snake_case_ = name.replace("""attn""" , """attention.self""" ) if "norm1" in name: snake_case_ = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: snake_case_ = name.replace("""norm2""" , """layernorm_after""" ) if "mlp.fc1" in name: snake_case_ = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: snake_case_ = name.replace("""mlp.fc2""" , """output.dense""" ) if name == "norm.weight": snake_case_ = """layernorm.weight""" if name == "norm.bias": snake_case_ = """layernorm.bias""" if "head" in name: snake_case_ = name.replace("""head""" , """classifier""" ) else: snake_case_ = """swin.""" + name return name def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]: for key in orig_state_dict.copy().keys(): snake_case_ = orig_state_dict.pop(_SCREAMING_SNAKE_CASE ) if "mask" in key: continue elif "qkv" in key: snake_case_ = key.split(""".""" ) snake_case_ = int(key_split[1] ) snake_case_ = int(key_split[3] ) snake_case_ = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: snake_case_ = val[:dim, :] snake_case_ = val[ dim : dim * 2, : ] snake_case_ = val[-dim:, :] else: snake_case_ = val[ :dim ] snake_case_ = val[ dim : dim * 2 ] snake_case_ = val[ -dim: ] else: snake_case_ = val return orig_state_dict def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any: snake_case_ = timm.create_model(_SCREAMING_SNAKE_CASE , pretrained=_SCREAMING_SNAKE_CASE ) timm_model.eval() snake_case_ = get_swin_config(_SCREAMING_SNAKE_CASE ) snake_case_ = SwinForImageClassification(_SCREAMING_SNAKE_CASE ) model.eval() snake_case_ = convert_state_dict(timm_model.state_dict() , _SCREAMING_SNAKE_CASE ) model.load_state_dict(_SCREAMING_SNAKE_CASE ) snake_case_ = """http://images.cocodataset.org/val2017/000000039769.jpg""" snake_case_ = AutoImageProcessor.from_pretrained("""microsoft/{}""".format(swin_name.replace("""_""" , """-""" ) ) ) snake_case_ = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ) snake_case_ = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ) snake_case_ = timm_model(inputs["""pixel_values"""] ) snake_case_ = model(**_SCREAMING_SNAKE_CASE ).logits assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 ) print(f"""Saving model {swin_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--swin_name', default='swin_tiny_patch4_window7_224', type=str, help='Name of the Swin timm model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) __SCREAMING_SNAKE_CASE : Any = parser.parse_args() convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
347
"""simple docstring""" from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments from transformers.testing_utils import TestCasePlus, require_torch, slow from transformers.utils import is_datasets_available if is_datasets_available(): import datasets class __A (snake_case__): '''simple docstring''' @slow @require_torch def lowerCAmelCase ( self : Union[str, Any] ) ->Dict: """simple docstring""" snake_case_ = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" ) snake_case_ = BertTokenizer.from_pretrained("""bert-base-uncased""" ) snake_case_ = bertabert.config.encoder.vocab_size snake_case_ = tokenizer.sep_token_id snake_case_ = tokenizer.cls_token_id snake_case_ = 128 snake_case_ = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" ) snake_case_ = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" ) snake_case_ = train_dataset.select(range(32 ) ) snake_case_ = val_dataset.select(range(16 ) ) snake_case_ = 4 def _map_to_encoder_decoder_inputs(UpperCAmelCase_ : int ): # Tokenizer will automatically set [BOS] <text> [EOS] snake_case_ = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=UpperCAmelCase_ , max_length=512 ) snake_case_ = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=UpperCAmelCase_ , max_length=128 ) snake_case_ = inputs.input_ids snake_case_ = inputs.attention_mask snake_case_ = outputs.input_ids snake_case_ = outputs.input_ids.copy() snake_case_ = [ [-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""] ] snake_case_ = outputs.attention_mask assert all(len(UpperCAmelCase_ ) == 512 for x in inputs.input_ids ) assert all(len(UpperCAmelCase_ ) == 128 for x in outputs.input_ids ) return batch def _compute_metrics(UpperCAmelCase_ : Union[str, Any] ): snake_case_ = pred.label_ids snake_case_ = pred.predictions # all unnecessary tokens are removed snake_case_ = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ ) snake_case_ = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ ) snake_case_ = sum([int(pred_str[i] == label_str[i] ) for i in range(len(UpperCAmelCase_ ) )] ) / len(UpperCAmelCase_ ) return {"accuracy": accuracy} # map train dataset snake_case_ = train_dataset.map( _map_to_encoder_decoder_inputs , batched=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , remove_columns=["""article""", """highlights"""] , ) train_dataset.set_format( type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , ) # same for validation dataset snake_case_ = val_dataset.map( _map_to_encoder_decoder_inputs , batched=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , remove_columns=["""article""", """highlights"""] , ) val_dataset.set_format( type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , ) snake_case_ = self.get_auto_remove_tmp_dir() snake_case_ = SeqaSeqTrainingArguments( output_dir=UpperCAmelCase_ , per_device_train_batch_size=UpperCAmelCase_ , per_device_eval_batch_size=UpperCAmelCase_ , predict_with_generate=UpperCAmelCase_ , evaluation_strategy="""steps""" , do_train=UpperCAmelCase_ , do_eval=UpperCAmelCase_ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , ) # instantiate trainer snake_case_ = SeqaSeqTrainer( model=UpperCAmelCase_ , args=UpperCAmelCase_ , compute_metrics=_compute_metrics , train_dataset=UpperCAmelCase_ , eval_dataset=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , ) # start training trainer.train()
347
1
"""simple docstring""" import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast @require_vision class __A (unittest.TestCase): '''simple docstring''' def lowerCAmelCase ( self : List[Any] ) ->Tuple: """simple docstring""" snake_case_ = tempfile.mkdtemp() snake_case_ = BlipImageProcessor() snake_case_ = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" ) snake_case_ = BlipProcessor(UpperCAmelCase_ , UpperCAmelCase_ ) processor.save_pretrained(self.tmpdirname ) def lowerCAmelCase ( self : Optional[int] , **UpperCAmelCase_ : str ) ->Tuple: """simple docstring""" return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase_ ).tokenizer def lowerCAmelCase ( self : Tuple , **UpperCAmelCase_ : Optional[Any] ) ->List[Any]: """simple docstring""" return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase_ ).image_processor def lowerCAmelCase ( self : str ) ->List[str]: """simple docstring""" shutil.rmtree(self.tmpdirname ) def lowerCAmelCase ( self : Tuple ) ->Any: """simple docstring""" snake_case_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] snake_case_ = [Image.fromarray(np.moveaxis(UpperCAmelCase_ , 0 , -1 ) ) for x in image_inputs] return image_inputs def lowerCAmelCase ( self : Optional[Any] ) ->Union[str, Any]: """simple docstring""" snake_case_ = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) snake_case_ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) snake_case_ = self.get_image_processor(do_normalize=UpperCAmelCase_ , padding_value=1.0 ) snake_case_ = BlipProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=UpperCAmelCase_ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , UpperCAmelCase_ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , UpperCAmelCase_ ) def lowerCAmelCase ( self : Optional[Any] ) ->str: """simple docstring""" snake_case_ = self.get_image_processor() snake_case_ = self.get_tokenizer() snake_case_ = BlipProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ ) snake_case_ = self.prepare_image_inputs() snake_case_ = image_processor(UpperCAmelCase_ , return_tensors="""np""" ) snake_case_ = processor(images=UpperCAmelCase_ , return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def lowerCAmelCase ( self : Optional[Any] ) ->Optional[int]: """simple docstring""" snake_case_ = self.get_image_processor() snake_case_ = self.get_tokenizer() snake_case_ = BlipProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ ) snake_case_ = """lower newer""" snake_case_ = processor(text=UpperCAmelCase_ ) snake_case_ = tokenizer(UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def lowerCAmelCase ( self : Tuple ) ->int: """simple docstring""" snake_case_ = self.get_image_processor() snake_case_ = self.get_tokenizer() snake_case_ = BlipProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ ) snake_case_ = """lower newer""" snake_case_ = self.prepare_image_inputs() snake_case_ = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_ ) self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] ) # test if it raises when no input is passed with pytest.raises(UpperCAmelCase_ ): processor() def lowerCAmelCase ( self : Union[str, Any] ) ->Optional[Any]: """simple docstring""" snake_case_ = self.get_image_processor() snake_case_ = self.get_tokenizer() snake_case_ = BlipProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ ) snake_case_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] snake_case_ = processor.batch_decode(UpperCAmelCase_ ) snake_case_ = tokenizer.batch_decode(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : List[Any] ) ->Optional[Any]: """simple docstring""" snake_case_ = self.get_image_processor() snake_case_ = self.get_tokenizer() snake_case_ = BlipProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ ) snake_case_ = """lower newer""" snake_case_ = self.prepare_image_inputs() snake_case_ = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_ ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
347
"""simple docstring""" # this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.: # python ./utils/get_modified_files.py utils src tests examples # # it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered # since the output of this script is fed into Makefile commands it doesn't print a newline after the results import re import subprocess import sys __SCREAMING_SNAKE_CASE : Tuple = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8') __SCREAMING_SNAKE_CASE : Tuple = subprocess.check_output(f"""git diff --name-only {fork_point_sha}""".split()).decode('utf-8').split() __SCREAMING_SNAKE_CASE : Any = '|'.join(sys.argv[1:]) __SCREAMING_SNAKE_CASE : Optional[Any] = re.compile(Rf"""^({joined_dirs}).*?\.py$""") __SCREAMING_SNAKE_CASE : List[str] = [x for x in modified_files if regex.match(x)] print(' '.join(relevant_modified_files), end='')
347
1
"""simple docstring""" import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging __SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : List[Any] = { 'google/pix2struct-textcaps-base': ( 'https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json' ), } class __A (snake_case__): '''simple docstring''' __lowercase: List[Any] = """pix2struct_text_model""" __lowercase: List[str] = ["""past_key_values"""] __lowercase: Dict = { """hidden_size""": """hidden_size""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers""", } def __init__( self : str , UpperCAmelCase_ : Optional[int]=50_244 , UpperCAmelCase_ : List[str]=768 , UpperCAmelCase_ : Union[str, Any]=64 , UpperCAmelCase_ : Any=2_048 , UpperCAmelCase_ : List[str]=12 , UpperCAmelCase_ : Optional[Any]=12 , UpperCAmelCase_ : Optional[int]=32 , UpperCAmelCase_ : Optional[int]=128 , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : Any=1E-6 , UpperCAmelCase_ : int=1.0 , UpperCAmelCase_ : str="gelu_new" , UpperCAmelCase_ : Optional[Any]=0 , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : List[Any]=0 , UpperCAmelCase_ : Dict=1 , UpperCAmelCase_ : str=False , UpperCAmelCase_ : int=True , **UpperCAmelCase_ : Dict , ) ->Union[str, Any]: """simple docstring""" snake_case_ = vocab_size snake_case_ = hidden_size snake_case_ = d_kv snake_case_ = d_ff snake_case_ = num_layers snake_case_ = num_heads snake_case_ = relative_attention_num_buckets snake_case_ = relative_attention_max_distance snake_case_ = dropout_rate snake_case_ = layer_norm_epsilon snake_case_ = initializer_factor snake_case_ = use_cache snake_case_ = eos_token_id snake_case_ = decoder_start_token_id # for backwards compatibility snake_case_ = dense_act_fn super().__init__( pad_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , decoder_start_token_id=UpperCAmelCase_ , tie_word_embeddings=UpperCAmelCase_ , is_decoder=UpperCAmelCase_ , **UpperCAmelCase_ , ) @classmethod def lowerCAmelCase ( cls : Optional[Any] , UpperCAmelCase_ : Union[str, os.PathLike] , **UpperCAmelCase_ : Union[str, Any] ) ->"PretrainedConfig": """simple docstring""" cls._set_token_in_kwargs(UpperCAmelCase_ ) snake_case_ , snake_case_ = cls.get_config_dict(UpperCAmelCase_ , **UpperCAmelCase_ ) # get the text config dict if we are loading from Pix2StructConfig if config_dict.get("""model_type""" ) == "pix2struct": snake_case_ = config_dict["""text_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(UpperCAmelCase_ , **UpperCAmelCase_ ) class __A (snake_case__): '''simple docstring''' __lowercase: Optional[Any] = """pix2struct_vision_model""" def __init__( self : Optional[int] , UpperCAmelCase_ : int=768 , UpperCAmelCase_ : Union[str, Any]=768 , UpperCAmelCase_ : List[Any]=2_048 , UpperCAmelCase_ : Tuple=64 , UpperCAmelCase_ : Union[str, Any]=12 , UpperCAmelCase_ : Optional[int]=12 , UpperCAmelCase_ : List[str]="gelu_new" , UpperCAmelCase_ : Any=1E-6 , UpperCAmelCase_ : Any=0.0 , UpperCAmelCase_ : Optional[int]=0.0 , UpperCAmelCase_ : Optional[Any]=1E-10 , UpperCAmelCase_ : str=1.0 , UpperCAmelCase_ : str=4_096 , UpperCAmelCase_ : Dict=32 , UpperCAmelCase_ : List[str]=128 , **UpperCAmelCase_ : List[Any] , ) ->str: """simple docstring""" super().__init__(**UpperCAmelCase_ ) snake_case_ = hidden_size snake_case_ = patch_embed_hidden_size snake_case_ = d_ff snake_case_ = dropout_rate snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = initializer_range snake_case_ = initializer_factor snake_case_ = attention_dropout snake_case_ = layer_norm_eps snake_case_ = dense_act_fn snake_case_ = seq_len snake_case_ = relative_attention_num_buckets snake_case_ = relative_attention_max_distance snake_case_ = d_kv @classmethod def lowerCAmelCase ( cls : str , UpperCAmelCase_ : Union[str, os.PathLike] , **UpperCAmelCase_ : List[str] ) ->"PretrainedConfig": """simple docstring""" cls._set_token_in_kwargs(UpperCAmelCase_ ) snake_case_ , snake_case_ = cls.get_config_dict(UpperCAmelCase_ , **UpperCAmelCase_ ) # get the vision config dict if we are loading from Pix2StructConfig if config_dict.get("""model_type""" ) == "pix2struct": snake_case_ = config_dict["""vision_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(UpperCAmelCase_ , **UpperCAmelCase_ ) class __A (snake_case__): '''simple docstring''' __lowercase: Optional[Any] = """pix2struct""" __lowercase: Union[str, Any] = True def __init__( self : Tuple , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : List[Any]=1.0 , UpperCAmelCase_ : List[str]=0.02 , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : int=True , **UpperCAmelCase_ : Optional[Any] , ) ->Optional[int]: """simple docstring""" super().__init__(tie_word_embeddings=UpperCAmelCase_ , is_encoder_decoder=UpperCAmelCase_ , **UpperCAmelCase_ ) if text_config is None: snake_case_ = {} logger.info("""text_config is None. Initializing the Pix2StructTextConfig with default values.""" ) if vision_config is None: snake_case_ = {} logger.info("""vision_config is None. Initializing the Pix2StructVisionConfig with default values.""" ) snake_case_ = PixaStructTextConfig(**UpperCAmelCase_ ) snake_case_ = PixaStructVisionConfig(**UpperCAmelCase_ ) snake_case_ = self.text_config.decoder_start_token_id snake_case_ = self.text_config.pad_token_id snake_case_ = self.text_config.eos_token_id snake_case_ = initializer_factor snake_case_ = initializer_range snake_case_ = self.initializer_range snake_case_ = self.initializer_range snake_case_ = is_vqa @classmethod def lowerCAmelCase ( cls : Tuple , UpperCAmelCase_ : PixaStructTextConfig , UpperCAmelCase_ : PixaStructVisionConfig , **UpperCAmelCase_ : Tuple ) ->Union[str, Any]: """simple docstring""" return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **UpperCAmelCase_ ) def lowerCAmelCase ( self : Optional[Any] ) ->Tuple: """simple docstring""" snake_case_ = copy.deepcopy(self.__dict__ ) snake_case_ = self.text_config.to_dict() snake_case_ = self.vision_config.to_dict() snake_case_ = self.__class__.model_type return output
347
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConfig, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaForPreTraining, WavaVecaProcessor, logging, ) from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification logging.set_verbosity_info() __SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : Tuple = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'adapter_layer': 'encoder.layers.*.adapter_layer', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', 'pooling_layer.linear': 'projector', 'pooling_layer.projection': 'classifier', } __SCREAMING_SNAKE_CASE : List[Any] = [ 'lm_head', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', 'projector', 'classifier', ] def _a ( _SCREAMING_SNAKE_CASE ) -> List[str]: snake_case_ = {} with open(_SCREAMING_SNAKE_CASE , """r""" ) as file: for line_number, line in enumerate(_SCREAMING_SNAKE_CASE ): snake_case_ = line.strip() if line: snake_case_ = line.split() snake_case_ = line_number snake_case_ = words[0] snake_case_ = value return result def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple: for attribute in key.split(""".""" ): snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(_SCREAMING_SNAKE_CASE ): snake_case_ = PARAM_MAPPING[full_name.split(""".""" )[-1]] snake_case_ = """param""" if weight_type is not None and weight_type != "param": snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).shape elif weight_type is not None and weight_type == "param": snake_case_ = hf_pointer for attribute in hf_param_name.split(""".""" ): snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = shape_pointer.shape # let's reduce dimension snake_case_ = value[0] else: snake_case_ = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": snake_case_ = value elif weight_type == "weight_g": snake_case_ = value elif weight_type == "weight_v": snake_case_ = value elif weight_type == "bias": snake_case_ = value elif weight_type == "param": for attribute in hf_param_name.split(""".""" ): snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = value else: snake_case_ = value logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple: snake_case_ = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(_SCREAMING_SNAKE_CASE ): snake_case_ = PARAM_MAPPING[full_name.split(""".""" )[-1]] snake_case_ = """param""" if weight_type is not None and weight_type != "param": snake_case_ = """.""".join([key, weight_type] ) elif weight_type is not None and weight_type == "param": snake_case_ = """.""".join([key, hf_param_name] ) else: snake_case_ = key snake_case_ = value if """lm_head""" in full_key else value[0] __SCREAMING_SNAKE_CASE : int = { 'W_a': 'linear_1.weight', 'W_b': 'linear_2.weight', 'b_a': 'linear_1.bias', 'b_b': 'linear_2.bias', 'ln_W': 'norm.weight', 'ln_b': 'norm.bias', } def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> List[str]: snake_case_ = False for key, mapped_key in MAPPING.items(): snake_case_ = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: snake_case_ = True if "*" in mapped_key: snake_case_ = name.split(_SCREAMING_SNAKE_CASE )[0].split(""".""" )[-2] snake_case_ = mapped_key.replace("""*""" , _SCREAMING_SNAKE_CASE ) if "weight_g" in name: snake_case_ = """weight_g""" elif "weight_v" in name: snake_case_ = """weight_v""" elif "bias" in name: snake_case_ = """bias""" elif "weight" in name: # TODO: don't match quantizer.weight_proj snake_case_ = """weight""" else: snake_case_ = None if hf_dict is not None: rename_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else: set_recursively(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return is_used return is_used def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any: snake_case_ = [] snake_case_ = fairseq_model.state_dict() snake_case_ = hf_model.wavaveca.feature_extractor for name, value in fairseq_dict.items(): snake_case_ = False if "conv_layers" in name: load_conv_layer( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == """group""" , ) snake_case_ = True else: snake_case_ = load_wavaveca_layer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if not is_used: unused_weights.append(_SCREAMING_SNAKE_CASE ) logger.warning(f"""Unused weights: {unused_weights}""" ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: snake_case_ = full_name.split("""conv_layers.""" )[-1] snake_case_ = name.split(""".""" ) snake_case_ = int(items[0] ) snake_case_ = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) snake_case_ = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) snake_case_ = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" ) snake_case_ = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" ) snake_case_ = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(_SCREAMING_SNAKE_CASE ) @torch.no_grad() def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False ) -> int: if config_path is not None: snake_case_ = WavaVecaConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) else: snake_case_ = WavaVecaConfig() if is_seq_class: snake_case_ = read_txt_into_dict(_SCREAMING_SNAKE_CASE ) snake_case_ = idalabel snake_case_ = WavaVecaForSequenceClassification(_SCREAMING_SNAKE_CASE ) snake_case_ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , ) feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE ) elif is_finetuned: if dict_path: snake_case_ = Dictionary.load(_SCREAMING_SNAKE_CASE ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq snake_case_ = target_dict.pad_index snake_case_ = target_dict.bos_index snake_case_ = target_dict.eos_index snake_case_ = len(target_dict.symbols ) snake_case_ = os.path.join(_SCREAMING_SNAKE_CASE , """vocab.json""" ) if not os.path.isdir(_SCREAMING_SNAKE_CASE ): logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(_SCREAMING_SNAKE_CASE ) ) return os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE ) snake_case_ = target_dict.indices # fairseq has the <pad> and <s> switched snake_case_ = 0 snake_case_ = 1 with open(_SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as vocab_handle: json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = WavaVecaCTCTokenizer( _SCREAMING_SNAKE_CASE , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=_SCREAMING_SNAKE_CASE , ) snake_case_ = True if config.feat_extract_norm == """layer""" else False snake_case_ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , ) snake_case_ = WavaVecaProcessor(feature_extractor=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE ) processor.save_pretrained(_SCREAMING_SNAKE_CASE ) snake_case_ = WavaVecaForCTC(_SCREAMING_SNAKE_CASE ) else: snake_case_ = WavaVecaForPreTraining(_SCREAMING_SNAKE_CASE ) if is_finetuned or is_seq_class: snake_case_ , snake_case_ , snake_case_ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) else: snake_case_ = argparse.Namespace(task="""audio_pretraining""" ) snake_case_ = fairseq.tasks.setup_task(_SCREAMING_SNAKE_CASE ) snake_case_ , snake_case_ , snake_case_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_SCREAMING_SNAKE_CASE ) snake_case_ = model[0].eval() recursively_load_weights(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , not is_finetuned ) hf_wavavec.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not' ) parser.add_argument( '--is_seq_class', action='store_true', help='Whether the model to convert is a fine-tuned sequence classification model or not', ) __SCREAMING_SNAKE_CASE : Any = parser.parse_args() __SCREAMING_SNAKE_CASE : List[Any] = not args.not_finetuned and not args.is_seq_class convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, is_finetuned, args.is_seq_class, )
347
1
"""simple docstring""" import unittest import numpy as np from transformers.file_utils import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DPTImageProcessor class __A (unittest.TestCase): '''simple docstring''' def __init__( self : Optional[int] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any]=7 , UpperCAmelCase_ : Any=3 , UpperCAmelCase_ : Optional[Any]=18 , UpperCAmelCase_ : int=30 , UpperCAmelCase_ : Optional[int]=400 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : List[str]=[0.5, 0.5, 0.5] , UpperCAmelCase_ : Dict=[0.5, 0.5, 0.5] , ) ->Tuple: """simple docstring""" snake_case_ = size if size is not None else {"""height""": 18, """width""": 18} snake_case_ = parent snake_case_ = batch_size snake_case_ = num_channels snake_case_ = image_size snake_case_ = min_resolution snake_case_ = max_resolution snake_case_ = do_resize snake_case_ = size snake_case_ = do_normalize snake_case_ = image_mean snake_case_ = image_std def lowerCAmelCase ( self : str ) ->Tuple: """simple docstring""" return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class __A (snake_case__ , unittest.TestCase): '''simple docstring''' __lowercase: str = DPTImageProcessor if is_vision_available() else None def lowerCAmelCase ( self : Union[str, Any] ) ->List[Any]: """simple docstring""" snake_case_ = DPTImageProcessingTester(self ) @property def lowerCAmelCase ( self : Dict ) ->List[Any]: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase ( self : str ) ->int: """simple docstring""" snake_case_ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCAmelCase_ , """image_mean""" ) ) self.assertTrue(hasattr(UpperCAmelCase_ , """image_std""" ) ) self.assertTrue(hasattr(UpperCAmelCase_ , """do_normalize""" ) ) self.assertTrue(hasattr(UpperCAmelCase_ , """do_resize""" ) ) self.assertTrue(hasattr(UpperCAmelCase_ , """size""" ) ) def lowerCAmelCase ( self : Union[str, Any] ) ->List[str]: """simple docstring""" snake_case_ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} ) snake_case_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} ) def lowerCAmelCase ( self : str ) ->Dict: """simple docstring""" snake_case_ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase_ , Image.Image ) # Test not batched input snake_case_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched snake_case_ = image_processing(UpperCAmelCase_ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) def lowerCAmelCase ( self : Optional[Any] ) ->Optional[int]: """simple docstring""" snake_case_ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase_ , np.ndarray ) # Test not batched input snake_case_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched snake_case_ = image_processing(UpperCAmelCase_ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) def lowerCAmelCase ( self : List[Any] ) ->List[Any]: """simple docstring""" snake_case_ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase_ , torch.Tensor ) # Test not batched input snake_case_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched snake_case_ = image_processing(UpperCAmelCase_ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , )
347
"""simple docstring""" import tempfile import unittest import numpy as np import transformers from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel if is_torch_available(): import torch class __A : '''simple docstring''' def __init__( self : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any=14 , UpperCAmelCase_ : Union[str, Any]=7 , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : str=99 , UpperCAmelCase_ : Union[str, Any]=32 , UpperCAmelCase_ : List[Any]=4 , UpperCAmelCase_ : Optional[int]=4 , UpperCAmelCase_ : int=4 , UpperCAmelCase_ : str=37 , UpperCAmelCase_ : Any="gelu" , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : int=512 , UpperCAmelCase_ : Tuple=0.02 , ) ->List[str]: """simple docstring""" snake_case_ = parent snake_case_ = batch_size snake_case_ = seq_length snake_case_ = is_training snake_case_ = use_input_mask snake_case_ = use_token_type_ids snake_case_ = use_labels snake_case_ = vocab_size snake_case_ = hidden_size snake_case_ = rotary_dim snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = max_position_embeddings snake_case_ = initializer_range snake_case_ = None snake_case_ = vocab_size - 1 snake_case_ = vocab_size - 1 snake_case_ = vocab_size - 1 def lowerCAmelCase ( self : int ) ->Optional[int]: """simple docstring""" snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case_ = None if self.use_input_mask: snake_case_ = random_attention_mask([self.batch_size, self.seq_length] ) snake_case_ = GPTJConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=UpperCAmelCase_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , ) return (config, input_ids, input_mask) def lowerCAmelCase ( self : Dict ) ->Tuple: """simple docstring""" snake_case_ = self.prepare_config_and_inputs() snake_case_ , snake_case_ , snake_case_ = config_and_inputs snake_case_ = {"""input_ids""": input_ids, """attention_mask""": attention_mask} return config, inputs_dict def lowerCAmelCase ( self : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict ) ->Tuple: """simple docstring""" snake_case_ = 20 snake_case_ = model_class_name(UpperCAmelCase_ ) snake_case_ = model.init_cache(input_ids.shape[0] , UpperCAmelCase_ ) snake_case_ = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""" ) snake_case_ = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) snake_case_ = model( input_ids[:, :-1] , attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , ) snake_case_ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" ) snake_case_ = model( input_ids[:, -1:] , attention_mask=UpperCAmelCase_ , past_key_values=outputs_cache.past_key_values , position_ids=UpperCAmelCase_ , ) snake_case_ = model(UpperCAmelCase_ ) snake_case_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" ) def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] ) ->Dict: """simple docstring""" snake_case_ = 20 snake_case_ = model_class_name(UpperCAmelCase_ ) snake_case_ = jnp.concatenate( [attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , ) snake_case_ = model.init_cache(input_ids.shape[0] , UpperCAmelCase_ ) snake_case_ = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) snake_case_ = model( input_ids[:, :-1] , attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , ) snake_case_ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" ) snake_case_ = model( input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , ) snake_case_ = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ ) snake_case_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" ) @require_flax class __A (snake_case__ , snake_case__ , unittest.TestCase): '''simple docstring''' __lowercase: Any = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else () __lowercase: List[str] = (FlaxGPTJForCausalLM,) if is_flax_available() else () def lowerCAmelCase ( self : Tuple ) ->List[str]: """simple docstring""" snake_case_ = FlaxGPTJModelTester(self ) def lowerCAmelCase ( self : int ) ->List[Any]: """simple docstring""" for model_class_name in self.all_model_classes: snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : List[str] ) ->Any: """simple docstring""" for model_class_name in self.all_model_classes: snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward_with_attn_mask( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) @tooslow def lowerCAmelCase ( self : List[str] ) ->Optional[Any]: """simple docstring""" snake_case_ = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""" ) snake_case_ = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ ) snake_case_ = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""" ) snake_case_ = False snake_case_ = model.config.eos_token_id snake_case_ = jax.jit(model.generate ) snake_case_ = jit_generate( inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id ).sequences snake_case_ = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ ) snake_case_ = [ """Hello this is a long string of text.\n\nI'm trying to get the text of the""", """Hey, I'm a little late to the party. I'm going to""", ] self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) @is_pt_flax_cross_test def lowerCAmelCase ( self : int ) ->str: """simple docstring""" snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs snake_case_ = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class snake_case_ = model_class.__name__[4:] # Skip the "Flax" at the beginning snake_case_ = getattr(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ , snake_case_ = pt_inputs["""input_ids"""].shape snake_case_ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(UpperCAmelCase_ ): snake_case_ = 0 snake_case_ = 1 snake_case_ = 0 snake_case_ = 1 snake_case_ = pt_model_class(UpperCAmelCase_ ).eval() snake_case_ = model_class(UpperCAmelCase_ , dtype=jnp.floataa ) snake_case_ = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , UpperCAmelCase_ ) snake_case_ = fx_state with torch.no_grad(): snake_case_ = pt_model(**UpperCAmelCase_ ).to_tuple() snake_case_ = fx_model(**UpperCAmelCase_ ).to_tuple() self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(UpperCAmelCase_ ) snake_case_ = model_class.from_pretrained(UpperCAmelCase_ , from_pt=UpperCAmelCase_ ) snake_case_ = fx_model_loaded(**UpperCAmelCase_ ).to_tuple() self.assertEqual( len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output_loaded, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ): self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) @is_pt_flax_cross_test def lowerCAmelCase ( self : List[Any] ) ->str: """simple docstring""" snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs snake_case_ = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class snake_case_ = model_class.__name__[4:] # Skip the "Flax" at the beginning snake_case_ = getattr(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = pt_model_class(UpperCAmelCase_ ).eval() snake_case_ = model_class(UpperCAmelCase_ , dtype=jnp.floataa ) snake_case_ = load_flax_weights_in_pytorch_model(UpperCAmelCase_ , fx_model.params ) snake_case_ , snake_case_ = pt_inputs["""input_ids"""].shape snake_case_ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(UpperCAmelCase_ ): snake_case_ = 0 snake_case_ = 1 snake_case_ = 0 snake_case_ = 1 # make sure weights are tied in PyTorch pt_model.tie_weights() with torch.no_grad(): snake_case_ = pt_model(**UpperCAmelCase_ ).to_tuple() snake_case_ = fx_model(**UpperCAmelCase_ ).to_tuple() self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(UpperCAmelCase_ ) snake_case_ = pt_model_class.from_pretrained(UpperCAmelCase_ , from_flax=UpperCAmelCase_ ) with torch.no_grad(): snake_case_ = pt_model_loaded(**UpperCAmelCase_ ).to_tuple() self.assertEqual( len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) @tooslow def lowerCAmelCase ( self : List[Any] ) ->str: """simple docstring""" for model_class_name in self.all_model_classes: snake_case_ = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""" ) snake_case_ = model(np.ones((1, 1) ) ) self.assertIsNotNone(UpperCAmelCase_ )
347
1
"""simple docstring""" from itertools import product def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> list[int]: snake_case_ = sides_number snake_case_ = max_face_number * dice_number snake_case_ = [0] * (max_total + 1) snake_case_ = 1 snake_case_ = range(_SCREAMING_SNAKE_CASE , max_face_number + 1 ) for dice_numbers in product(_SCREAMING_SNAKE_CASE , repeat=_SCREAMING_SNAKE_CASE ): snake_case_ = sum(_SCREAMING_SNAKE_CASE ) totals_frequencies[total] += 1 return totals_frequencies def _a ( ) -> float: snake_case_ = total_frequency_distribution( sides_number=4 , dice_number=9 ) snake_case_ = total_frequency_distribution( sides_number=6 , dice_number=6 ) snake_case_ = 0 snake_case_ = 9 snake_case_ = 4 * 9 snake_case_ = 6 for peter_total in range(_SCREAMING_SNAKE_CASE , max_peter_total + 1 ): peter_wins_count += peter_totals_frequencies[peter_total] * sum( colin_totals_frequencies[min_colin_total:peter_total] ) snake_case_ = (4**9) * (6**6) snake_case_ = peter_wins_count / total_games_number snake_case_ = round(_SCREAMING_SNAKE_CASE , ndigits=7 ) return rounded_peter_win_probability if __name__ == "__main__": print(f"""{solution() = }""")
347
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import CONFIG_MAPPING __SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__) class __A (snake_case__): '''simple docstring''' __lowercase: int = """upernet""" def __init__( self : str , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : str=512 , UpperCAmelCase_ : int=0.02 , UpperCAmelCase_ : Optional[Any]=[1, 2, 3, 6] , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Tuple=0.4 , UpperCAmelCase_ : Tuple=384 , UpperCAmelCase_ : Union[str, Any]=256 , UpperCAmelCase_ : str=1 , UpperCAmelCase_ : Tuple=False , UpperCAmelCase_ : Tuple=255 , **UpperCAmelCase_ : Dict , ) ->Union[str, Any]: """simple docstring""" super().__init__(**UpperCAmelCase_ ) if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" ) snake_case_ = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] ) elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): snake_case_ = backbone_config.get("""model_type""" ) snake_case_ = CONFIG_MAPPING[backbone_model_type] snake_case_ = config_class.from_dict(UpperCAmelCase_ ) snake_case_ = backbone_config snake_case_ = hidden_size snake_case_ = initializer_range snake_case_ = pool_scales snake_case_ = use_auxiliary_head snake_case_ = auxiliary_loss_weight snake_case_ = auxiliary_in_channels snake_case_ = auxiliary_channels snake_case_ = auxiliary_num_convs snake_case_ = auxiliary_concat_input snake_case_ = loss_ignore_index def lowerCAmelCase ( self : str ) ->Optional[Any]: """simple docstring""" snake_case_ = copy.deepcopy(self.__dict__ ) snake_case_ = self.backbone_config.to_dict() snake_case_ = self.__class__.model_type return output
347
1
"""simple docstring""" def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float: if density <= 0: raise ValueError("""Impossible fluid density""" ) if bulk_modulus <= 0: raise ValueError("""Impossible bulk modulus""" ) return (bulk_modulus / density) ** 0.5 if __name__ == "__main__": import doctest doctest.testmod()
347
"""simple docstring""" import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class __A (unittest.TestCase): '''simple docstring''' def lowerCAmelCase ( self : Optional[int] ) ->Dict: """simple docstring""" snake_case_ = """ylacombe/bark-small""" snake_case_ = tempfile.mkdtemp() snake_case_ = """en_speaker_1""" snake_case_ = """This is a test string""" snake_case_ = """speaker_embeddings_path.json""" snake_case_ = """speaker_embeddings""" def lowerCAmelCase ( self : List[str] , **UpperCAmelCase_ : str ) ->Optional[int]: """simple docstring""" return AutoTokenizer.from_pretrained(self.checkpoint , **UpperCAmelCase_ ) def lowerCAmelCase ( self : Any ) ->Dict: """simple docstring""" shutil.rmtree(self.tmpdirname ) def lowerCAmelCase ( self : List[Any] ) ->Dict: """simple docstring""" snake_case_ = self.get_tokenizer() snake_case_ = BarkProcessor(tokenizer=UpperCAmelCase_ ) processor.save_pretrained(self.tmpdirname ) snake_case_ = BarkProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) @slow def lowerCAmelCase ( self : Dict ) ->int: """simple docstring""" snake_case_ = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) processor.save_pretrained( self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , ) snake_case_ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) snake_case_ = BarkProcessor.from_pretrained( self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) def lowerCAmelCase ( self : Optional[Any] ) ->Any: """simple docstring""" snake_case_ = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) snake_case_ = 35 snake_case_ = 2 snake_case_ = 8 snake_case_ = { """semantic_prompt""": np.ones(UpperCAmelCase_ ), """coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ), """fine_prompt""": np.ones((nb_codebooks_total, seq_len) ), } # test providing already loaded voice_preset snake_case_ = processor(text=self.input_string , voice_preset=UpperCAmelCase_ ) snake_case_ = inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCAmelCase_ , np.array([] ) ).tolist() ) # test loading voice preset from npz file snake_case_ = os.path.join(self.tmpdirname , """file.npz""" ) np.savez(UpperCAmelCase_ , **UpperCAmelCase_ ) snake_case_ = processor(text=self.input_string , voice_preset=UpperCAmelCase_ ) snake_case_ = inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCAmelCase_ , np.array([] ) ).tolist() ) # test loading voice preset from the hub snake_case_ = processor(text=self.input_string , voice_preset=self.voice_preset ) def lowerCAmelCase ( self : Tuple ) ->Dict: """simple docstring""" snake_case_ = self.get_tokenizer() snake_case_ = BarkProcessor(tokenizer=UpperCAmelCase_ ) snake_case_ = processor(text=self.input_string ) snake_case_ = tokenizer( self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
347
1
"""simple docstring""" import logging import os import threading import time try: import warnings except ImportError: __SCREAMING_SNAKE_CASE : Union[str, Any] = None try: import msvcrt except ImportError: __SCREAMING_SNAKE_CASE : List[Any] = None try: import fcntl except ImportError: __SCREAMING_SNAKE_CASE : List[str] = None # Backward compatibility # ------------------------------------------------ try: TimeoutError except NameError: __SCREAMING_SNAKE_CASE : Dict = OSError # Data # ------------------------------------------------ __SCREAMING_SNAKE_CASE : Tuple = [ 'Timeout', 'BaseFileLock', 'WindowsFileLock', 'UnixFileLock', 'SoftFileLock', 'FileLock', ] __SCREAMING_SNAKE_CASE : List[Any] = '3.0.12' __SCREAMING_SNAKE_CASE : Any = None def _a ( ) -> List[Any]: global _logger snake_case_ = _logger or logging.getLogger(__name__ ) return _logger class __A (snake_case__): '''simple docstring''' def __init__( self : List[Any] , UpperCAmelCase_ : List[str] ) ->Union[str, Any]: """simple docstring""" snake_case_ = lock_file return None def __str__( self : str ) ->int: """simple docstring""" snake_case_ = F"""The file lock '{self.lock_file}' could not be acquired.""" return temp class __A : '''simple docstring''' def __init__( self : Any , UpperCAmelCase_ : List[Any] ) ->Dict: """simple docstring""" snake_case_ = lock return None def __enter__( self : List[str] ) ->Optional[Any]: """simple docstring""" return self.lock def __exit__( self : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int ) ->Any: """simple docstring""" self.lock.release() return None class __A : '''simple docstring''' def __init__( self : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : str=-1 , UpperCAmelCase_ : List[Any]=None ) ->Tuple: """simple docstring""" snake_case_ = max_filename_length if max_filename_length is not None else 255 # Hash the filename if it's too long snake_case_ = self.hash_filename_if_too_long(UpperCAmelCase_ , UpperCAmelCase_ ) # The path to the lock file. snake_case_ = lock_file # The file descriptor for the *_lock_file* as it is returned by the # os.open() function. # This file lock is only NOT None, if the object currently holds the # lock. snake_case_ = None # The default timeout value. snake_case_ = timeout # We use this lock primarily for the lock counter. snake_case_ = threading.Lock() # The lock counter is used for implementing the nested locking # mechanism. Whenever the lock is acquired, the counter is increased and # the lock is only released, when this value is 0 again. snake_case_ = 0 return None @property def lowerCAmelCase ( self : Tuple ) ->Optional[int]: """simple docstring""" return self._lock_file @property def lowerCAmelCase ( self : List[str] ) ->Union[str, Any]: """simple docstring""" return self._timeout @timeout.setter def lowerCAmelCase ( self : List[Any] , UpperCAmelCase_ : List[str] ) ->Any: """simple docstring""" snake_case_ = float(UpperCAmelCase_ ) return None def lowerCAmelCase ( self : List[Any] ) ->str: """simple docstring""" raise NotImplementedError() def lowerCAmelCase ( self : Dict ) ->Optional[Any]: """simple docstring""" raise NotImplementedError() @property def lowerCAmelCase ( self : Optional[Any] ) ->Optional[int]: """simple docstring""" return self._lock_file_fd is not None def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Tuple=0.05 ) ->Optional[Any]: """simple docstring""" if timeout is None: snake_case_ = self.timeout # Increment the number right at the beginning. # We can still undo it, if something fails. with self._thread_lock: self._lock_counter += 1 snake_case_ = id(self ) snake_case_ = self._lock_file snake_case_ = time.time() try: while True: with self._thread_lock: if not self.is_locked: logger().debug(F"""Attempting to acquire lock {lock_id} on {lock_filename}""" ) self._acquire() if self.is_locked: logger().debug(F"""Lock {lock_id} acquired on {lock_filename}""" ) break elif timeout >= 0 and time.time() - start_time > timeout: logger().debug(F"""Timeout on acquiring lock {lock_id} on {lock_filename}""" ) raise Timeout(self._lock_file ) else: logger().debug( F"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" ) time.sleep(UpperCAmelCase_ ) except: # noqa # Something did go wrong, so decrement the counter. with self._thread_lock: snake_case_ = max(0 , self._lock_counter - 1 ) raise return _Acquire_ReturnProxy(lock=self ) def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : str=False ) ->int: """simple docstring""" with self._thread_lock: if self.is_locked: self._lock_counter -= 1 if self._lock_counter == 0 or force: snake_case_ = id(self ) snake_case_ = self._lock_file logger().debug(F"""Attempting to release lock {lock_id} on {lock_filename}""" ) self._release() snake_case_ = 0 logger().debug(F"""Lock {lock_id} released on {lock_filename}""" ) return None def __enter__( self : Union[str, Any] ) ->Any: """simple docstring""" self.acquire() return self def __exit__( self : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] ) ->Optional[Any]: """simple docstring""" self.release() return None def __del__( self : Any ) ->Optional[Any]: """simple docstring""" self.release(force=UpperCAmelCase_ ) return None def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : int ) ->str: """simple docstring""" snake_case_ = os.path.basename(UpperCAmelCase_ ) if len(UpperCAmelCase_ ) > max_length and max_length > 0: snake_case_ = os.path.dirname(UpperCAmelCase_ ) snake_case_ = str(hash(UpperCAmelCase_ ) ) snake_case_ = filename[: max_length - len(UpperCAmelCase_ ) - 8] + """...""" + hashed_filename + """.lock""" return os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) else: return path class __A (snake_case__): '''simple docstring''' def __init__( self : List[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any]=-1 , UpperCAmelCase_ : Tuple=None ) ->Tuple: """simple docstring""" from .file_utils import relative_to_absolute_path super().__init__(UpperCAmelCase_ , timeout=UpperCAmelCase_ , max_filename_length=UpperCAmelCase_ ) snake_case_ = """\\\\?\\""" + relative_to_absolute_path(self.lock_file ) def lowerCAmelCase ( self : Dict ) ->str: """simple docstring""" snake_case_ = os.O_RDWR | os.O_CREAT | os.O_TRUNC try: snake_case_ = os.open(self._lock_file , UpperCAmelCase_ ) except OSError: pass else: try: msvcrt.locking(UpperCAmelCase_ , msvcrt.LK_NBLCK , 1 ) except OSError: os.close(UpperCAmelCase_ ) else: snake_case_ = fd return None def lowerCAmelCase ( self : Optional[Any] ) ->Any: """simple docstring""" snake_case_ = self._lock_file_fd snake_case_ = None msvcrt.locking(UpperCAmelCase_ , msvcrt.LK_UNLCK , 1 ) os.close(UpperCAmelCase_ ) try: os.remove(self._lock_file ) # Probably another instance of the application # that acquired the file lock. except OSError: pass return None class __A (snake_case__): '''simple docstring''' def __init__( self : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int]=-1 , UpperCAmelCase_ : Optional[Any]=None ) ->int: """simple docstring""" snake_case_ = os.statvfs(os.path.dirname(UpperCAmelCase_ ) ).f_namemax super().__init__(UpperCAmelCase_ , timeout=UpperCAmelCase_ , max_filename_length=UpperCAmelCase_ ) def lowerCAmelCase ( self : Dict ) ->Optional[Any]: """simple docstring""" snake_case_ = os.O_RDWR | os.O_CREAT | os.O_TRUNC snake_case_ = os.open(self._lock_file , UpperCAmelCase_ ) try: fcntl.flock(UpperCAmelCase_ , fcntl.LOCK_EX | fcntl.LOCK_NB ) except OSError: os.close(UpperCAmelCase_ ) else: snake_case_ = fd return None def lowerCAmelCase ( self : Union[str, Any] ) ->Optional[int]: """simple docstring""" snake_case_ = self._lock_file_fd snake_case_ = None fcntl.flock(UpperCAmelCase_ , fcntl.LOCK_UN ) os.close(UpperCAmelCase_ ) return None class __A (snake_case__): '''simple docstring''' def lowerCAmelCase ( self : List[str] ) ->int: """simple docstring""" snake_case_ = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC try: snake_case_ = os.open(self._lock_file , UpperCAmelCase_ ) except OSError: pass else: snake_case_ = fd return None def lowerCAmelCase ( self : Any ) ->List[Any]: """simple docstring""" os.close(self._lock_file_fd ) snake_case_ = None try: os.remove(self._lock_file ) # The file is already deleted and that's what we want. except OSError: pass return None __SCREAMING_SNAKE_CASE : Optional[int] = None if msvcrt: __SCREAMING_SNAKE_CASE : Tuple = WindowsFileLock elif fcntl: __SCREAMING_SNAKE_CASE : Union[str, Any] = UnixFileLock else: __SCREAMING_SNAKE_CASE : str = SoftFileLock if warnings is not None: warnings.warn('only soft file lock is available')
347
"""simple docstring""" import argparse import json import os import sys import tempfile import unittest from argparse import Namespace from dataclasses import dataclass, field from enum import Enum from pathlib import Path from typing import List, Literal, Optional import yaml from transformers import HfArgumentParser, TrainingArguments from transformers.hf_argparser import make_choice_type_function, string_to_bool # Since Python 3.10, we can use the builtin `|` operator for Union types # See PEP 604: https://peps.python.org/pep-0604 __SCREAMING_SNAKE_CASE : int = sys.version_info >= (3, 10) def _a ( _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Tuple: return field(default_factory=lambda: default , metadata=_SCREAMING_SNAKE_CASE ) @dataclass class __A : '''simple docstring''' __lowercase: int __lowercase: float __lowercase: str __lowercase: bool @dataclass class __A : '''simple docstring''' __lowercase: int = 42 __lowercase: str = field(default="""toto""" , metadata={"""help""": """help message"""}) @dataclass class __A : '''simple docstring''' __lowercase: bool = False __lowercase: bool = True __lowercase: Optional[bool] = None class __A (snake_case__): '''simple docstring''' __lowercase: str = """titi""" __lowercase: Any = """toto""" class __A (snake_case__): '''simple docstring''' __lowercase: int = """titi""" __lowercase: Optional[Any] = """toto""" __lowercase: List[Any] = 42 @dataclass class __A : '''simple docstring''' __lowercase: BasicEnum = "toto" def lowerCAmelCase ( self : int ) ->List[Any]: """simple docstring""" snake_case_ = BasicEnum(self.foo ) @dataclass class __A : '''simple docstring''' __lowercase: MixedTypeEnum = "toto" def lowerCAmelCase ( self : Optional[int] ) ->Optional[Any]: """simple docstring""" snake_case_ = MixedTypeEnum(self.foo ) @dataclass class __A : '''simple docstring''' __lowercase: Optional[int] = None __lowercase: Optional[float] = field(default=snake_case__ , metadata={"""help""": """help message"""}) __lowercase: Optional[str] = None __lowercase: Optional[List[str]] = list_field(default=[]) __lowercase: Optional[List[int]] = list_field(default=[]) @dataclass class __A : '''simple docstring''' __lowercase: List[int] = list_field(default=[]) __lowercase: List[int] = list_field(default=[1, 2, 3]) __lowercase: List[str] = list_field(default=["""Hallo""", """Bonjour""", """Hello"""]) __lowercase: List[float] = list_field(default=[0.1, 0.2, 0.3]) @dataclass class __A : '''simple docstring''' __lowercase: List[int] = field() __lowercase: str = field() __lowercase: BasicEnum = field() def lowerCAmelCase ( self : Any ) ->str: """simple docstring""" snake_case_ = BasicEnum(self.required_enum ) @dataclass class __A : '''simple docstring''' __lowercase: int __lowercase: "BasicEnum" = field() __lowercase: "Optional[bool]" = None __lowercase: "str" = field(default="""toto""" , metadata={"""help""": """help message"""}) __lowercase: "List[str]" = list_field(default=["""Hallo""", """Bonjour""", """Hello"""]) if is_python_no_less_than_3_10: @dataclass class __A : '''simple docstring''' __lowercase: bool = False __lowercase: bool = True __lowercase: bool | None = None @dataclass class __A : '''simple docstring''' __lowercase: int | None = None __lowercase: float | None = field(default=snake_case__ , metadata={"""help""": """help message"""}) __lowercase: str | None = None __lowercase: list[str] | None = list_field(default=[]) __lowercase: list[int] | None = list_field(default=[]) class __A (unittest.TestCase): '''simple docstring''' def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : argparse.ArgumentParser , UpperCAmelCase_ : argparse.ArgumentParser ) ->Optional[int]: """simple docstring""" self.assertEqual(len(a._actions ) , len(b._actions ) ) for x, y in zip(a._actions , b._actions ): snake_case_ = {k: v for k, v in vars(UpperCAmelCase_ ).items() if k != """container"""} snake_case_ = {k: v for k, v in vars(UpperCAmelCase_ ).items() if k != """container"""} # Choices with mixed type have custom function as "type" # So we need to compare results directly for equality if xx.get("""choices""" , UpperCAmelCase_ ) and yy.get("""choices""" , UpperCAmelCase_ ): for expected_choice in yy["choices"] + xx["choices"]: self.assertEqual(xx["""type"""](UpperCAmelCase_ ) , yy["""type"""](UpperCAmelCase_ ) ) del xx["type"], yy["type"] self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : int ) ->Any: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = argparse.ArgumentParser() expected.add_argument("""--foo""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ ) expected.add_argument("""--bar""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ ) expected.add_argument("""--baz""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ ) expected.add_argument("""--flag""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , const=UpperCAmelCase_ , nargs="""?""" ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = ["""--foo""", """1""", """--baz""", """quux""", """--bar""", """0.5"""] ((snake_case_) , ) = parser.parse_args_into_dataclasses(UpperCAmelCase_ , look_for_args_file=UpperCAmelCase_ ) self.assertFalse(example.flag ) def lowerCAmelCase ( self : Optional[Any] ) ->List[Any]: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = argparse.ArgumentParser() expected.add_argument("""--foo""" , default=42 , type=UpperCAmelCase_ ) expected.add_argument("""--baz""" , default="""toto""" , type=UpperCAmelCase_ , help="""help message""" ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : List[Any] ) ->Union[str, Any]: """simple docstring""" snake_case_ = argparse.ArgumentParser() expected.add_argument("""--foo""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , const=UpperCAmelCase_ , nargs="""?""" ) expected.add_argument("""--baz""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , const=UpperCAmelCase_ , nargs="""?""" ) # A boolean no_* argument always has to come after its "default: True" regular counter-part # and its default must be set to False expected.add_argument("""--no_baz""" , action="""store_false""" , default=UpperCAmelCase_ , dest="""baz""" ) expected.add_argument("""--opt""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ ) snake_case_ = [WithDefaultBoolExample] if is_python_no_less_than_3_10: dataclass_types.append(UpperCAmelCase_ ) for dataclass_type in dataclass_types: snake_case_ = HfArgumentParser(UpperCAmelCase_ ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = parser.parse_args([] ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) ) snake_case_ = parser.parse_args(["""--foo""", """--no_baz"""] ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) ) snake_case_ = parser.parse_args(["""--foo""", """--baz"""] ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) ) snake_case_ = parser.parse_args(["""--foo""", """True""", """--baz""", """True""", """--opt""", """True"""] ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) ) snake_case_ = parser.parse_args(["""--foo""", """False""", """--baz""", """False""", """--opt""", """False"""] ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) ) def lowerCAmelCase ( self : int ) ->List[str]: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = argparse.ArgumentParser() expected.add_argument( """--foo""" , default="""toto""" , choices=["""titi""", """toto""", 42] , type=make_choice_type_function(["""titi""", """toto""", 42] ) , ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = parser.parse_args([] ) self.assertEqual(args.foo , """toto""" ) snake_case_ = parser.parse_args_into_dataclasses([] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.toto ) snake_case_ = parser.parse_args(["""--foo""", """titi"""] ) self.assertEqual(args.foo , """titi""" ) snake_case_ = parser.parse_args_into_dataclasses(["""--foo""", """titi"""] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.titi ) snake_case_ = parser.parse_args(["""--foo""", """42"""] ) self.assertEqual(args.foo , 42 ) snake_case_ = parser.parse_args_into_dataclasses(["""--foo""", """42"""] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo ) def lowerCAmelCase ( self : Dict ) ->str: """simple docstring""" @dataclass class __A : '''simple docstring''' __lowercase: Literal["titi", "toto", 42] = "toto" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = argparse.ArgumentParser() expected.add_argument( """--foo""" , default="""toto""" , choices=("""titi""", """toto""", 42) , type=make_choice_type_function(["""titi""", """toto""", 42] ) , ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = parser.parse_args([] ) self.assertEqual(args.foo , """toto""" ) snake_case_ = parser.parse_args(["""--foo""", """titi"""] ) self.assertEqual(args.foo , """titi""" ) snake_case_ = parser.parse_args(["""--foo""", """42"""] ) self.assertEqual(args.foo , 42 ) def lowerCAmelCase ( self : Optional[int] ) ->Dict: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = argparse.ArgumentParser() expected.add_argument("""--foo_int""" , nargs="""+""" , default=[] , type=UpperCAmelCase_ ) expected.add_argument("""--bar_int""" , nargs="""+""" , default=[1, 2, 3] , type=UpperCAmelCase_ ) expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=UpperCAmelCase_ ) expected.add_argument("""--foo_float""" , nargs="""+""" , default=[0.1, 0.2, 0.3] , type=UpperCAmelCase_ ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = parser.parse_args([] ) self.assertEqual( UpperCAmelCase_ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["""Hallo""", """Bonjour""", """Hello"""] , foo_float=[0.1, 0.2, 0.3] ) , ) snake_case_ = parser.parse_args("""--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7""".split() ) self.assertEqual(UpperCAmelCase_ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["""a""", """b""", """c"""] , foo_float=[0.1, 0.7] ) ) def lowerCAmelCase ( self : Optional[int] ) ->List[Any]: """simple docstring""" snake_case_ = argparse.ArgumentParser() expected.add_argument("""--foo""" , default=UpperCAmelCase_ , type=UpperCAmelCase_ ) expected.add_argument("""--bar""" , default=UpperCAmelCase_ , type=UpperCAmelCase_ , help="""help message""" ) expected.add_argument("""--baz""" , default=UpperCAmelCase_ , type=UpperCAmelCase_ ) expected.add_argument("""--ces""" , nargs="""+""" , default=[] , type=UpperCAmelCase_ ) expected.add_argument("""--des""" , nargs="""+""" , default=[] , type=UpperCAmelCase_ ) snake_case_ = [OptionalExample] if is_python_no_less_than_3_10: dataclass_types.append(UpperCAmelCase_ ) for dataclass_type in dataclass_types: snake_case_ = HfArgumentParser(UpperCAmelCase_ ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = parser.parse_args([] ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , bar=UpperCAmelCase_ , baz=UpperCAmelCase_ , ces=[] , des=[] ) ) snake_case_ = parser.parse_args("""--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3""".split() ) self.assertEqual(UpperCAmelCase_ , Namespace(foo=12 , bar=3.14 , baz="""42""" , ces=["""a""", """b""", """c"""] , des=[1, 2, 3] ) ) def lowerCAmelCase ( self : Optional[Any] ) ->Optional[Any]: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = argparse.ArgumentParser() expected.add_argument("""--required_list""" , nargs="""+""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ ) expected.add_argument("""--required_str""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ ) expected.add_argument( """--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=UpperCAmelCase_ , ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : Optional[int] ) ->List[Any]: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = argparse.ArgumentParser() expected.add_argument("""--foo""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ ) expected.add_argument( """--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=UpperCAmelCase_ , ) expected.add_argument("""--opt""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ ) expected.add_argument("""--baz""" , default="""toto""" , type=UpperCAmelCase_ , help="""help message""" ) expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=UpperCAmelCase_ ) self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : Dict ) ->Tuple: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = { """foo""": 12, """bar""": 3.14, """baz""": """42""", """flag""": True, } snake_case_ = parser.parse_dict(UpperCAmelCase_ )[0] snake_case_ = BasicExample(**UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : Optional[Any] ) ->List[Any]: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = { """foo""": 12, """bar""": 3.14, """baz""": """42""", """flag""": True, """extra""": 42, } self.assertRaises(UpperCAmelCase_ , parser.parse_dict , UpperCAmelCase_ , allow_extra_keys=UpperCAmelCase_ ) def lowerCAmelCase ( self : Any ) ->Dict: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = { """foo""": 12, """bar""": 3.14, """baz""": """42""", """flag""": True, } with tempfile.TemporaryDirectory() as tmp_dir: snake_case_ = os.path.join(UpperCAmelCase_ , """temp_json""" ) os.mkdir(UpperCAmelCase_ ) with open(temp_local_path + """.json""" , """w+""" ) as f: json.dump(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = parser.parse_yaml_file(Path(temp_local_path + """.json""" ) )[0] snake_case_ = BasicExample(**UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : Optional[int] ) ->List[str]: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) snake_case_ = { """foo""": 12, """bar""": 3.14, """baz""": """42""", """flag""": True, } with tempfile.TemporaryDirectory() as tmp_dir: snake_case_ = os.path.join(UpperCAmelCase_ , """temp_yaml""" ) os.mkdir(UpperCAmelCase_ ) with open(temp_local_path + """.yaml""" , """w+""" ) as f: yaml.dump(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = parser.parse_yaml_file(Path(temp_local_path + """.yaml""" ) )[0] snake_case_ = BasicExample(**UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : Dict ) ->Any: """simple docstring""" snake_case_ = HfArgumentParser(UpperCAmelCase_ ) self.assertIsNotNone(UpperCAmelCase_ )
347
1
"""simple docstring""" import unittest import numpy as np from transformers import RobertaConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): from transformers.models.roberta.modeling_flax_roberta import ( FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, ) class __A (unittest.TestCase): '''simple docstring''' def __init__( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int]=13 , UpperCAmelCase_ : Any=7 , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Union[str, Any]=99 , UpperCAmelCase_ : Dict=32 , UpperCAmelCase_ : str=5 , UpperCAmelCase_ : str=4 , UpperCAmelCase_ : Union[str, Any]=37 , UpperCAmelCase_ : List[str]="gelu" , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : Any=512 , UpperCAmelCase_ : Union[str, Any]=16 , UpperCAmelCase_ : Optional[Any]=2 , UpperCAmelCase_ : Tuple=0.02 , UpperCAmelCase_ : List[Any]=4 , ) ->Union[str, Any]: """simple docstring""" snake_case_ = parent snake_case_ = batch_size snake_case_ = seq_length snake_case_ = is_training snake_case_ = use_attention_mask snake_case_ = use_token_type_ids snake_case_ = use_labels snake_case_ = vocab_size snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = max_position_embeddings snake_case_ = type_vocab_size snake_case_ = type_sequence_label_size snake_case_ = initializer_range snake_case_ = num_choices def lowerCAmelCase ( self : int ) ->int: """simple docstring""" snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case_ = None if self.use_attention_mask: snake_case_ = random_attention_mask([self.batch_size, self.seq_length] ) snake_case_ = None if self.use_token_type_ids: snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) snake_case_ = RobertaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def lowerCAmelCase ( self : int ) ->Optional[Any]: """simple docstring""" snake_case_ = self.prepare_config_and_inputs() snake_case_ , snake_case_ , snake_case_ , snake_case_ = config_and_inputs snake_case_ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict def lowerCAmelCase ( self : Optional[Any] ) ->Optional[int]: """simple docstring""" snake_case_ = self.prepare_config_and_inputs() snake_case_ , snake_case_ , snake_case_ , snake_case_ = config_and_inputs snake_case_ = True snake_case_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) snake_case_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax class __A (snake_case__ , unittest.TestCase): '''simple docstring''' __lowercase: Tuple = True __lowercase: List[str] = ( ( FlaxRobertaModel, FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, ) if is_flax_available() else () ) def lowerCAmelCase ( self : int ) ->Tuple: """simple docstring""" snake_case_ = FlaxRobertaModelTester(self ) @slow def lowerCAmelCase ( self : Optional[Any] ) ->List[Any]: """simple docstring""" for model_class_name in self.all_model_classes: snake_case_ = model_class_name.from_pretrained("""roberta-base""" , from_pt=UpperCAmelCase_ ) snake_case_ = model(np.ones((1, 1) ) ) self.assertIsNotNone(UpperCAmelCase_ )
347
"""simple docstring""" import logging import os from typing import Dict, List, Optional, Union import torch import torch.nn as nn from accelerate.utils.imports import ( is_abit_bnb_available, is_abit_bnb_available, is_bnb_available, ) from ..big_modeling import dispatch_model, init_empty_weights from .dataclasses import BnbQuantizationConfig from .modeling import ( find_tied_parameters, get_balanced_memory, infer_auto_device_map, load_checkpoint_in_model, offload_weight, set_module_tensor_to_device, ) if is_bnb_available(): import bitsandbytes as bnb from copy import deepcopy __SCREAMING_SNAKE_CASE : Any = logging.getLogger(__name__) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , ) -> Optional[Any]: snake_case_ = bnb_quantization_config.load_in_abit snake_case_ = bnb_quantization_config.load_in_abit if load_in_abit and not is_abit_bnb_available(): raise ImportError( """You have a version of `bitsandbytes` that is not compatible with 8bit quantization,""" """ make sure you have the latest version of `bitsandbytes` installed.""" ) if load_in_abit and not is_abit_bnb_available(): raise ValueError( """You have a version of `bitsandbytes` that is not compatible with 4bit quantization,""" """make sure you have the latest version of `bitsandbytes` installed.""" ) snake_case_ = [] # custom device map if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(device_map.keys() ) > 1: snake_case_ = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]] # We keep some modules such as the lm_head in their original dtype for numerical stability reasons if bnb_quantization_config.skip_modules is None: snake_case_ = get_keys_to_not_convert(_SCREAMING_SNAKE_CASE ) # add cpu modules to skip modules only for 4-bit modules if load_in_abit: bnb_quantization_config.skip_modules.extend(_SCREAMING_SNAKE_CASE ) snake_case_ = bnb_quantization_config.skip_modules # We add the modules we want to keep in full precision if bnb_quantization_config.keep_in_fpaa_modules is None: snake_case_ = [] snake_case_ = bnb_quantization_config.keep_in_fpaa_modules modules_to_not_convert.extend(_SCREAMING_SNAKE_CASE ) # compatibility with peft snake_case_ = load_in_abit snake_case_ = load_in_abit snake_case_ = get_parameter_device(_SCREAMING_SNAKE_CASE ) if model_device.type != "meta": # quantization of an already loaded model logger.warning( """It is not recommended to quantize a loaded model. """ """The model should be instantiated under the `init_empty_weights` context manager.""" ) snake_case_ = replace_with_bnb_layers(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , modules_to_not_convert=_SCREAMING_SNAKE_CASE ) # convert param to the right dtype snake_case_ = bnb_quantization_config.torch_dtype for name, param in model.state_dict().items(): if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ): param.to(torch.floataa ) if param.dtype != torch.floataa: snake_case_ = name.replace(""".weight""" , """""" ).replace(""".bias""" , """""" ) snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if param is not None: param.to(torch.floataa ) elif torch.is_floating_point(_SCREAMING_SNAKE_CASE ): param.to(_SCREAMING_SNAKE_CASE ) if model_device.type == "cuda": # move everything to cpu in the first place because we can't do quantization if the weights are already on cuda model.cuda(torch.cuda.current_device() ) torch.cuda.empty_cache() elif torch.cuda.is_available(): model.to(torch.cuda.current_device() ) else: raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" ) logger.info( f"""The model device type is {model_device.type}. However, cuda is needed for quantization.""" """We move the model to cuda.""" ) return model elif weights_location is None: raise RuntimeError( f"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ ) else: with init_empty_weights(): snake_case_ = replace_with_bnb_layers( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , modules_to_not_convert=_SCREAMING_SNAKE_CASE ) snake_case_ = get_quantized_model_device_map( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , max_memory=_SCREAMING_SNAKE_CASE , no_split_module_classes=_SCREAMING_SNAKE_CASE , ) if offload_state_dict is None and device_map is not None and "disk" in device_map.values(): snake_case_ = True snake_case_ = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] ) load_checkpoint_in_model( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , dtype=bnb_quantization_config.torch_dtype , offload_folder=_SCREAMING_SNAKE_CASE , offload_state_dict=_SCREAMING_SNAKE_CASE , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , ) return dispatch_model(_SCREAMING_SNAKE_CASE , device_map=_SCREAMING_SNAKE_CASE , offload_dir=_SCREAMING_SNAKE_CASE ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Tuple: if device_map is None: if torch.cuda.is_available(): snake_case_ = {"""""": torch.cuda.current_device()} else: raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" ) logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: raise ValueError( """If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """ """'sequential'.""" ) snake_case_ = {} special_dtypes.update( { name: bnb_quantization_config.torch_dtype for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.skip_modules ) } ) special_dtypes.update( { name: torch.floataa for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules ) } ) snake_case_ = {} snake_case_ = special_dtypes snake_case_ = no_split_module_classes snake_case_ = bnb_quantization_config.target_dtype # get max_memory for each device. if device_map != "sequential": snake_case_ = get_balanced_memory( _SCREAMING_SNAKE_CASE , low_zero=(device_map == """balanced_low_0""") , max_memory=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) snake_case_ = max_memory snake_case_ = infer_auto_device_map(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): # check if don't have any quantized module on the cpu snake_case_ = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules snake_case_ = { key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert } for device in ["cpu", "disk"]: if device in device_map_without_some_modules.values(): if bnb_quantization_config.load_in_abit: raise ValueError( """ Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit the quantized model. If you want to dispatch the model on the CPU or the disk while keeping these modules in `torch_dtype`, you need to pass a custom `device_map` to `load_and_quantize_model`. Check https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk for more details. """ ) else: logger.info( """Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" ) del device_map_without_some_modules return device_map def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Tuple: if modules_to_not_convert is None: snake_case_ = [] snake_case_ , snake_case_ = _replace_with_bnb_layers( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if not has_been_replaced: logger.warning( """You are loading your model in 8bit or 4bit but no linear modules were found in your model.""" """ this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.""" """ Please double check your model architecture, or submit an issue on github if you think this is""" """ a bug.""" ) return model def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , ) -> List[Any]: snake_case_ = False for name, module in model.named_children(): if current_key_name is None: snake_case_ = [] current_key_name.append(_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` snake_case_ = """.""".join(_SCREAMING_SNAKE_CASE ) snake_case_ = True for key in modules_to_not_convert: if ( (key in current_key_name_str) and (key + "." in current_key_name_str) ) or key == current_key_name_str: snake_case_ = False break if proceed: # Load bnb module with empty weight and replace ``nn.Linear` module if bnb_quantization_config.load_in_abit: snake_case_ = bnb.nn.LinearabitLt( module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=_SCREAMING_SNAKE_CASE , threshold=bnb_quantization_config.llm_inta_threshold , ) elif bnb_quantization_config.load_in_abit: snake_case_ = bnb.nn.Linearabit( module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , ) else: raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" ) snake_case_ = module.weight.data if module.bias is not None: snake_case_ = module.bias.data bnb_module.requires_grad_(_SCREAMING_SNAKE_CASE ) setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = True if len(list(module.children() ) ) > 0: snake_case_ , snake_case_ = _replace_with_bnb_layers( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = has_been_replaced | _has_been_replaced # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def _a ( _SCREAMING_SNAKE_CASE ) -> Any: # Create a copy of the model with init_empty_weights(): snake_case_ = deepcopy(_SCREAMING_SNAKE_CASE ) # this has 0 cost since it is done inside `init_empty_weights` context manager` snake_case_ = find_tied_parameters(_SCREAMING_SNAKE_CASE ) # For compatibility with Accelerate < 0.18 if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): snake_case_ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: snake_case_ = sum(_SCREAMING_SNAKE_CASE , [] ) snake_case_ = len(_SCREAMING_SNAKE_CASE ) > 0 # Check if it is a base model snake_case_ = False if hasattr(_SCREAMING_SNAKE_CASE , """base_model_prefix""" ): snake_case_ = not hasattr(_SCREAMING_SNAKE_CASE , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head snake_case_ = list(model.named_children() ) snake_case_ = [list_modules[-1][0]] # add last module together with tied weights snake_case_ = set(_SCREAMING_SNAKE_CASE ) - set(_SCREAMING_SNAKE_CASE ) snake_case_ = list(set(_SCREAMING_SNAKE_CASE ) ) + list(_SCREAMING_SNAKE_CASE ) # remove ".weight" from the keys snake_case_ = [""".weight""", """.bias"""] snake_case_ = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: snake_case_ = name.replace(_SCREAMING_SNAKE_CASE , """""" ) filtered_module_names.append(_SCREAMING_SNAKE_CASE ) return filtered_module_names def _a ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: for m in model.modules(): if isinstance(_SCREAMING_SNAKE_CASE , bnb.nn.Linearabit ): return True return False def _a ( _SCREAMING_SNAKE_CASE ) -> Optional[int]: return next(parameter.parameters() ).device def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]: # if it is not quantized, we quantize and offload the quantized weights and the SCB stats if fpaa_statistics is None: set_module_tensor_to_device(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 0 , dtype=_SCREAMING_SNAKE_CASE , value=_SCREAMING_SNAKE_CASE ) snake_case_ = param_name snake_case_ = model if "." in tensor_name: snake_case_ = tensor_name.split(""".""" ) for split in splits[:-1]: snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if new_module is None: raise ValueError(f"""{module} has no attribute {split}.""" ) snake_case_ = new_module snake_case_ = splits[-1] # offload weights snake_case_ = False offload_weight(module._parameters[tensor_name] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE ) if hasattr(module._parameters[tensor_name] , """SCB""" ): offload_weight( module._parameters[tensor_name].SCB , param_name.replace("""weight""" , """SCB""" ) , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE , ) else: offload_weight(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE ) offload_weight(_SCREAMING_SNAKE_CASE , param_name.replace("""weight""" , """SCB""" ) , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE ) set_module_tensor_to_device(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , """meta""" , dtype=_SCREAMING_SNAKE_CASE , value=torch.empty(*param.size() ) )
347
1
"""simple docstring""" from __future__ import annotations def _a ( _SCREAMING_SNAKE_CASE ) -> int: # preprocessing the first row for i in range(1 , len(matrix[0] ) ): matrix[0][i] += matrix[0][i - 1] # preprocessing the first column for i in range(1 , len(_SCREAMING_SNAKE_CASE ) ): matrix[i][0] += matrix[i - 1][0] # updating the path cost for current position for i in range(1 , len(_SCREAMING_SNAKE_CASE ) ): for j in range(1 , len(matrix[0] ) ): matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] ) return matrix[-1][-1] if __name__ == "__main__": import doctest doctest.testmod()
347
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : Tuple = { 'microsoft/beit-base-patch16-224-pt22k': ( 'https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json' ), # See all BEiT models at https://huggingface.co/models?filter=beit } class __A (snake_case__): '''simple docstring''' __lowercase: Optional[int] = """beit""" def __init__( self : List[str] , UpperCAmelCase_ : List[Any]=8_192 , UpperCAmelCase_ : Dict=768 , UpperCAmelCase_ : int=12 , UpperCAmelCase_ : Tuple=12 , UpperCAmelCase_ : List[Any]=3_072 , UpperCAmelCase_ : Tuple="gelu" , UpperCAmelCase_ : Dict=0.0 , UpperCAmelCase_ : List[str]=0.0 , UpperCAmelCase_ : Any=0.02 , UpperCAmelCase_ : Optional[Any]=1E-12 , UpperCAmelCase_ : int=224 , UpperCAmelCase_ : Tuple=16 , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : int=False , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : Tuple=False , UpperCAmelCase_ : int=False , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : Dict=[3, 5, 7, 11] , UpperCAmelCase_ : Tuple=[1, 2, 3, 6] , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : List[Any]=0.4 , UpperCAmelCase_ : Optional[Any]=256 , UpperCAmelCase_ : Optional[Any]=1 , UpperCAmelCase_ : int=False , UpperCAmelCase_ : Tuple=255 , **UpperCAmelCase_ : List[str] , ) ->Optional[Any]: """simple docstring""" super().__init__(**UpperCAmelCase_ ) snake_case_ = vocab_size snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = initializer_range snake_case_ = layer_norm_eps snake_case_ = image_size snake_case_ = patch_size snake_case_ = num_channels snake_case_ = use_mask_token snake_case_ = use_absolute_position_embeddings snake_case_ = use_relative_position_bias snake_case_ = use_shared_relative_position_bias snake_case_ = layer_scale_init_value snake_case_ = drop_path_rate snake_case_ = use_mean_pooling # decode head attributes (semantic segmentation) snake_case_ = out_indices snake_case_ = pool_scales # auxiliary head attributes (semantic segmentation) snake_case_ = use_auxiliary_head snake_case_ = auxiliary_loss_weight snake_case_ = auxiliary_channels snake_case_ = auxiliary_num_convs snake_case_ = auxiliary_concat_input snake_case_ = semantic_loss_ignore_index class __A (snake_case__): '''simple docstring''' __lowercase: List[Any] = version.parse("""1.11""") @property def lowerCAmelCase ( self : Dict ) ->Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def lowerCAmelCase ( self : Any ) ->float: """simple docstring""" return 1E-4
347
1
"""simple docstring""" import os import re import sys import traceback import warnings from pathlib import Path from typing import Dict, Optional, Union from uuid import uuida from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami from huggingface_hub.file_download import REGEX_COMMIT_HASH from huggingface_hub.utils import ( EntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError, is_jinja_available, ) from packaging import version from requests import HTTPError from .. import __version__ from .constants import ( DEPRECATED_REVISION_ARGS, DIFFUSERS_CACHE, HUGGINGFACE_CO_RESOLVE_ENDPOINT, SAFETENSORS_WEIGHTS_NAME, WEIGHTS_NAME, ) from .import_utils import ( ENV_VARS_TRUE_VALUES, _flax_version, _jax_version, _onnxruntime_version, _torch_version, is_flax_available, is_onnx_available, is_torch_available, ) from .logging import get_logger __SCREAMING_SNAKE_CASE : str = get_logger(__name__) __SCREAMING_SNAKE_CASE : Dict = Path(__file__).parent / 'model_card_template.md' __SCREAMING_SNAKE_CASE : List[str] = uuida().hex __SCREAMING_SNAKE_CASE : int = os.getenv('HF_HUB_OFFLINE', '').upper() in ENV_VARS_TRUE_VALUES __SCREAMING_SNAKE_CASE : Tuple = os.getenv('DISABLE_TELEMETRY', '').upper() in ENV_VARS_TRUE_VALUES __SCREAMING_SNAKE_CASE : List[Any] = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '/api/telemetry/' def _a ( _SCREAMING_SNAKE_CASE = None ) -> str: snake_case_ = f"""diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}""" if DISABLE_TELEMETRY or HF_HUB_OFFLINE: return ua + "; telemetry/off" if is_torch_available(): ua += f"""; torch/{_torch_version}""" if is_flax_available(): ua += f"""; jax/{_jax_version}""" ua += f"""; flax/{_flax_version}""" if is_onnx_available(): ua += f"""; onnxruntime/{_onnxruntime_version}""" # CI will set this value to True if os.environ.get("""DIFFUSERS_IS_CI""" , """""" ).upper() in ENV_VARS_TRUE_VALUES: ua += "; is_ci/true" if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): ua += "; " + "; ".join(f"""{k}/{v}""" for k, v in user_agent.items() ) elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): ua += "; " + user_agent return ua def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None ) -> List[Any]: if token is None: snake_case_ = HfFolder.get_token() if organization is None: snake_case_ = whoami(_SCREAMING_SNAKE_CASE )["""name"""] return f"""{username}/{model_id}""" else: return f"""{organization}/{model_id}""" def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]: if not is_jinja_available(): raise ValueError( """Modelcard rendering is based on Jinja templates.""" """ Please make sure to have `jinja` installed before using `create_model_card`.""" """ To install it, please run `pip install Jinja2`.""" ) if hasattr(_SCREAMING_SNAKE_CASE , """local_rank""" ) and args.local_rank not in [-1, 0]: return snake_case_ = args.hub_token if hasattr(_SCREAMING_SNAKE_CASE , """hub_token""" ) else None snake_case_ = get_full_repo_name(_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE ) snake_case_ = ModelCard.from_template( card_data=ModelCardData( # Card metadata object that will be converted to YAML block language="""en""" , license="""apache-2.0""" , library_name="""diffusers""" , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=_SCREAMING_SNAKE_CASE , model_name=_SCREAMING_SNAKE_CASE , repo_name=_SCREAMING_SNAKE_CASE , dataset_name=args.dataset_name if hasattr(_SCREAMING_SNAKE_CASE , """dataset_name""" ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=( args.gradient_accumulation_steps if hasattr(_SCREAMING_SNAKE_CASE , """gradient_accumulation_steps""" ) else None ) , adam_betaa=args.adam_betaa if hasattr(_SCREAMING_SNAKE_CASE , """adam_beta1""" ) else None , adam_betaa=args.adam_betaa if hasattr(_SCREAMING_SNAKE_CASE , """adam_beta2""" ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(_SCREAMING_SNAKE_CASE , """adam_weight_decay""" ) else None , adam_epsilon=args.adam_epsilon if hasattr(_SCREAMING_SNAKE_CASE , """adam_epsilon""" ) else None , lr_scheduler=args.lr_scheduler if hasattr(_SCREAMING_SNAKE_CASE , """lr_scheduler""" ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(_SCREAMING_SNAKE_CASE , """lr_warmup_steps""" ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(_SCREAMING_SNAKE_CASE , """ema_inv_gamma""" ) else None , ema_power=args.ema_power if hasattr(_SCREAMING_SNAKE_CASE , """ema_power""" ) else None , ema_max_decay=args.ema_max_decay if hasattr(_SCREAMING_SNAKE_CASE , """ema_max_decay""" ) else None , mixed_precision=args.mixed_precision , ) snake_case_ = os.path.join(args.output_dir , """README.md""" ) model_card.save(_SCREAMING_SNAKE_CASE ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Optional[int]: if resolved_file is None or commit_hash is not None: return commit_hash snake_case_ = str(Path(_SCREAMING_SNAKE_CASE ).as_posix() ) snake_case_ = re.search(r"""snapshots/([^/]+)/""" , _SCREAMING_SNAKE_CASE ) if search is None: return None snake_case_ = search.groups()[0] return commit_hash if REGEX_COMMIT_HASH.match(_SCREAMING_SNAKE_CASE ) else None # Old default cache path, potentially to be migrated. # This logic was more or less taken from `transformers`, with the following differences: # - Diffusers doesn't use custom environment variables to specify the cache path. # - There is no need to migrate the cache format, just move the files to the new location. __SCREAMING_SNAKE_CASE : List[Any] = os.path.expanduser( os.getenv('HF_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'huggingface')) ) __SCREAMING_SNAKE_CASE : str = os.path.join(hf_cache_home, 'diffusers') def _a ( _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None ) -> None: if new_cache_dir is None: snake_case_ = DIFFUSERS_CACHE if old_cache_dir is None: snake_case_ = old_diffusers_cache snake_case_ = Path(_SCREAMING_SNAKE_CASE ).expanduser() snake_case_ = Path(_SCREAMING_SNAKE_CASE ).expanduser() for old_blob_path in old_cache_dir.glob("""**/blobs/*""" ): if old_blob_path.is_file() and not old_blob_path.is_symlink(): snake_case_ = new_cache_dir / old_blob_path.relative_to(_SCREAMING_SNAKE_CASE ) new_blob_path.parent.mkdir(parents=_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE ) os.replace(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) try: os.symlink(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) except OSError: logger.warning( """Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.""" ) # At this point, old_cache_dir contains symlinks to the new cache (it can still be used). __SCREAMING_SNAKE_CASE : List[str] = os.path.join(DIFFUSERS_CACHE, 'version_diffusers_cache.txt') if not os.path.isfile(cache_version_file): __SCREAMING_SNAKE_CASE : List[str] = 0 else: with open(cache_version_file) as f: try: __SCREAMING_SNAKE_CASE : Union[str, Any] = int(f.read()) except ValueError: __SCREAMING_SNAKE_CASE : Union[str, Any] = 0 if cache_version < 1: __SCREAMING_SNAKE_CASE : Optional[Any] = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0 if old_cache_is_not_empty: logger.warning( 'The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your ' 'existing cached models. This is a one-time operation, you can interrupt it or run it ' 'later by calling `diffusers.utils.hub_utils.move_cache()`.' ) try: move_cache() except Exception as e: __SCREAMING_SNAKE_CASE : Tuple = '\n'.join(traceback.format_tb(e.__traceback__)) logger.error( f"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """ 'file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole ' 'message and we will do our best to help.' ) if cache_version < 1: try: os.makedirs(DIFFUSERS_CACHE, exist_ok=True) with open(cache_version_file, 'w') as f: f.write('1') except Exception: logger.warning( f"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """ 'the directory exists and can be written to.' ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> str: if variant is not None: snake_case_ = weights_name.split(""".""" ) snake_case_ = splits[:-1] + [variant] + splits[-1:] snake_case_ = """.""".join(_SCREAMING_SNAKE_CASE ) return weights_name def _a ( _SCREAMING_SNAKE_CASE , *, _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , ) -> List[Any]: snake_case_ = str(_SCREAMING_SNAKE_CASE ) if os.path.isfile(_SCREAMING_SNAKE_CASE ): return pretrained_model_name_or_path elif os.path.isdir(_SCREAMING_SNAKE_CASE ): if os.path.isfile(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ): # Load from a PyTorch checkpoint snake_case_ = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return model_file elif subfolder is not None and os.path.isfile( os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ): snake_case_ = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return model_file else: raise EnvironmentError( f"""Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.""" ) else: # 1. First check if deprecated way of loading from branches is used if ( revision in DEPRECATED_REVISION_ARGS and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME) and version.parse(version.parse(_SCREAMING_SNAKE_CASE ).base_version ) >= version.parse("""0.20.0""" ) ): try: snake_case_ = hf_hub_download( _SCREAMING_SNAKE_CASE , filename=_add_variant(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , cache_dir=_SCREAMING_SNAKE_CASE , force_download=_SCREAMING_SNAKE_CASE , proxies=_SCREAMING_SNAKE_CASE , resume_download=_SCREAMING_SNAKE_CASE , local_files_only=_SCREAMING_SNAKE_CASE , use_auth_token=_SCREAMING_SNAKE_CASE , user_agent=_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE , revision=revision or commit_hash , ) warnings.warn( f"""Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead.""" , _SCREAMING_SNAKE_CASE , ) return model_file except: # noqa: E722 warnings.warn( f"""You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}' so that the correct variant file can be added.""" , _SCREAMING_SNAKE_CASE , ) try: # 2. Load model file as usual snake_case_ = hf_hub_download( _SCREAMING_SNAKE_CASE , filename=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , force_download=_SCREAMING_SNAKE_CASE , proxies=_SCREAMING_SNAKE_CASE , resume_download=_SCREAMING_SNAKE_CASE , local_files_only=_SCREAMING_SNAKE_CASE , use_auth_token=_SCREAMING_SNAKE_CASE , user_agent=_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE , revision=revision or commit_hash , ) return model_file except RepositoryNotFoundError: raise EnvironmentError( f"""{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier """ """listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a """ """token having permission to this repo with `use_auth_token` or log in with `huggingface-cli """ """login`.""" ) except RevisionNotFoundError: raise EnvironmentError( f"""{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for """ """this model name. Check the model page at """ f"""'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions.""" ) except EntryNotFoundError: raise EnvironmentError( f"""{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.""" ) except HTTPError as err: raise EnvironmentError( f"""There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}""" ) except ValueError: raise EnvironmentError( f"""We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it""" f""" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a""" f""" directory containing a file named {weights_name} or""" """ \nCheckout your internet connection or see how to run the library in""" """ offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'.""" ) except EnvironmentError: raise EnvironmentError( f"""Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from """ """'https://huggingface.co/models', make sure you don't have a local directory with the same name. """ f"""Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory """ f"""containing a file named {weights_name}""" )
347
"""simple docstring""" import os import re import warnings from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer if TYPE_CHECKING: from ...tokenization_utils_base import TextInput from ...utils import logging __SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : List[Any] = {'vocab_file': 'spiece.model'} __SCREAMING_SNAKE_CASE : int = { 'vocab_file': { 't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model', 't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model', 't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model', 't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model', 't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model', } } # TODO(PVP) - this should be removed in Transformers v5 __SCREAMING_SNAKE_CASE : Dict = { 't5-small': 512, 't5-base': 512, 't5-large': 512, 't5-3b': 512, 't5-11b': 512, } __SCREAMING_SNAKE_CASE : Optional[int] = '▁' class __A (snake_case__): '''simple docstring''' __lowercase: Optional[int] = VOCAB_FILES_NAMES __lowercase: Any = PRETRAINED_VOCAB_FILES_MAP __lowercase: Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowercase: List[str] = ["""input_ids""", """attention_mask"""] def __init__( self : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any]="</s>" , UpperCAmelCase_ : Optional[Any]="<unk>" , UpperCAmelCase_ : Any="<pad>" , UpperCAmelCase_ : Tuple=100 , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : Optional[Dict[str, Any]] = None , UpperCAmelCase_ : Optional[int]=True , **UpperCAmelCase_ : Dict , ) ->None: """simple docstring""" if extra_ids > 0 and additional_special_tokens is None: snake_case_ = [F"""<extra_id_{i}>""" for i in range(UpperCAmelCase_ )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra_id special tokens snake_case_ = len(set(filter(lambda UpperCAmelCase_ : bool("""extra_id""" in str(UpperCAmelCase_ ) ) , UpperCAmelCase_ ) ) ) if extra_tokens != extra_ids: raise ValueError( F"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are""" """ provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids""" """ tokens""" ) if legacy: logger.warning_once( F"""You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to""" """ read the related pull request available at https://github.com/huggingface/transformers/pull/24565""" ) snake_case_ = legacy snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , extra_ids=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , legacy=UpperCAmelCase_ , **UpperCAmelCase_ , ) snake_case_ = vocab_file snake_case_ = extra_ids snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(UpperCAmelCase_ ) @staticmethod def lowerCAmelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any] ) ->Union[str, Any]: """simple docstring""" if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes: snake_case_ = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path] if init_max_model_length is not None and init_max_model_length != max_model_length: return init_max_model_length elif init_max_model_length is None: warnings.warn( """This tokenizer was incorrectly instantiated with a model max length of""" F""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this""" """ behavior is kept to avoid breaking backwards compatibility when padding/encoding with""" """ `truncation is True`.\n- Be aware that you SHOULD NOT rely on""" F""" {pretrained_model_name_or_path} automatically truncating your input to""" F""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences""" F""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with""" """ `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please""" """ instantiate this tokenizer with `model_max_length` set to your preferred value.""" , UpperCAmelCase_ , ) return max_model_length @property def lowerCAmelCase ( self : Optional[Any] ) ->Optional[Any]: """simple docstring""" return self.sp_model.get_piece_size() + self._extra_ids def lowerCAmelCase ( self : Any ) ->Optional[int]: """simple docstring""" snake_case_ = {self.convert_ids_to_tokens(UpperCAmelCase_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False ) ->List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ ) # normal case: some special tokens if token_ids_a is None: return ([0] * len(UpperCAmelCase_ )) + [1] return ([0] * len(UpperCAmelCase_ )) + [1] + ([0] * len(UpperCAmelCase_ )) + [1] def lowerCAmelCase ( self : Any ) ->List[str]: """simple docstring""" return list( set(filter(lambda UpperCAmelCase_ : bool(re.search(R"""<extra_id_\d+>""" , UpperCAmelCase_ ) ) is not None , self.additional_special_tokens ) ) ) def lowerCAmelCase ( self : Dict ) ->str: """simple docstring""" return [self._convert_token_to_id(UpperCAmelCase_ ) for token in self.get_sentinel_tokens()] def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : List[int] ) ->List[int]: """simple docstring""" if len(UpperCAmelCase_ ) > 0 and token_ids[-1] == self.eos_token_id: warnings.warn( F"""This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated""" """ eos tokens being added.""" ) return token_ids else: return token_ids + [self.eos_token_id] def lowerCAmelCase ( self : str , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ) ->List[int]: """simple docstring""" snake_case_ = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ) ->List[int]: """simple docstring""" snake_case_ = self._add_eos_if_not_present(UpperCAmelCase_ ) if token_ids_a is None: return token_ids_a else: snake_case_ = self._add_eos_if_not_present(UpperCAmelCase_ ) return token_ids_a + token_ids_a def __getstate__( self : Optional[Any] ) ->Tuple: """simple docstring""" snake_case_ = self.__dict__.copy() snake_case_ = None return state def __setstate__( self : Optional[Any] , UpperCAmelCase_ : List[Any] ) ->List[Any]: """simple docstring""" snake_case_ = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): snake_case_ = {} snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def lowerCAmelCase ( self : int , UpperCAmelCase_ : "TextInput" , **UpperCAmelCase_ : Tuple ) ->List[str]: """simple docstring""" if not self.legacy: snake_case_ = SPIECE_UNDERLINE + text.replace(UpperCAmelCase_ , """ """ ) return super().tokenize(UpperCAmelCase_ , **UpperCAmelCase_ ) def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Any ) ->Tuple: """simple docstring""" if not self.legacy: snake_case_ = text.startswith(UpperCAmelCase_ ) if is_first: snake_case_ = text[1:] snake_case_ = self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_ ) if not self.legacy and not is_first and not text.startswith(""" """ ) and tokens[0].startswith(UpperCAmelCase_ ): snake_case_ = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:] return tokens def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : List[Any] ) ->Tuple: """simple docstring""" if token.startswith("""<extra_id_""" ): snake_case_ = re.match(R"""<extra_id_(\d+)>""" , UpperCAmelCase_ ) snake_case_ = int(match.group(1 ) ) return self.vocab_size - num - 1 return self.sp_model.piece_to_id(UpperCAmelCase_ ) def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : Optional[Any] ) ->List[Any]: """simple docstring""" if index < self.sp_model.get_piece_size(): snake_case_ = self.sp_model.IdToPiece(UpperCAmelCase_ ) else: snake_case_ = F"""<extra_id_{self.vocab_size - 1 - index}>""" return token def lowerCAmelCase ( self : List[Any] , UpperCAmelCase_ : List[str] ) ->Optional[Any]: """simple docstring""" snake_case_ = [] snake_case_ = """""" snake_case_ = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(UpperCAmelCase_ ) + token snake_case_ = True snake_case_ = [] else: current_sub_tokens.append(UpperCAmelCase_ ) snake_case_ = False out_string += self.sp_model.decode(UpperCAmelCase_ ) return out_string.strip() def lowerCAmelCase ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ) ->Tuple[str]: """simple docstring""" if not os.path.isdir(UpperCAmelCase_ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return snake_case_ = os.path.join( UpperCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCAmelCase_ ) elif not os.path.isfile(self.vocab_file ): with open(UpperCAmelCase_ , """wb""" ) as fi: snake_case_ = self.sp_model.serialized_model_proto() fi.write(UpperCAmelCase_ ) return (out_vocab_file,)
347
1