code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_poolformer import PoolFormerImageProcessor a_ = logging.get_logger(__name__) class __SCREAMING_SNAKE_CASE ( lowerCamelCase ): def __init__( self : Union[str, Any] , *__lowercase : str , **__lowercase : Tuple ) -> None: warnings.warn( '''The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use PoolFormerImageProcessor instead.''' , __lowercase , ) super().__init__(*__lowercase , **__lowercase )
152
'''simple docstring''' import argparse import torch from ...utils import logging from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert logging.set_verbosity_info() def _a( UpperCamelCase__ : str, UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] =AlbertConfig.from_json_file(UpperCamelCase__ ) print(f"Building PyTorch model from configuration: {config}" ) SCREAMING_SNAKE_CASE__ : Any =AlbertForPreTraining(UpperCamelCase__ ) # Load weights from tf checkpoint load_tf_weights_in_albert(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ) # Save pytorch-model print(f"Save PyTorch model to {pytorch_dump_path}" ) torch.save(model.state_dict(), UpperCamelCase__ ) if __name__ == "__main__": a_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--albert_config_file', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained ALBERT model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) a_ = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
152
1
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
279
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { '''microsoft/biogpt''': '''https://huggingface.co/microsoft/biogpt/resolve/main/config.json''', # See all BioGPT models at https://huggingface.co/models?filter=biogpt } class __lowerCAmelCase ( _a ): lowerCamelCase_ : Any = '''biogpt''' def __init__(self , __magic_name__=4_2384 , __magic_name__=1024 , __magic_name__=24 , __magic_name__=16 , __magic_name__=4096 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=1024 , __magic_name__=0.02 , __magic_name__=1e-12 , __magic_name__=True , __magic_name__=True , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=1 , __magic_name__=0 , __magic_name__=2 , **__magic_name__ , ) -> List[str]: '''simple docstring''' snake_case_ : List[str] = vocab_size snake_case_ : Dict = max_position_embeddings snake_case_ : Optional[int] = hidden_size snake_case_ : List[Any] = num_hidden_layers snake_case_ : List[str] = num_attention_heads snake_case_ : int = intermediate_size snake_case_ : List[Any] = hidden_act snake_case_ : List[Any] = hidden_dropout_prob snake_case_ : Optional[int] = attention_probs_dropout_prob snake_case_ : Optional[int] = initializer_range snake_case_ : Optional[int] = layer_norm_eps snake_case_ : str = scale_embedding snake_case_ : Optional[Any] = use_cache snake_case_ : Optional[Any] = layerdrop snake_case_ : Optional[Any] = activation_dropout super().__init__(pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
279
1
'''simple docstring''' from __future__ import annotations lowerCamelCase__ = '#' class lowerCAmelCase__ : def __init__( self : Union[str, Any] ) ->None: '''simple docstring''' _UpperCAmelCase : dict = {} def lowerCAmelCase__ ( self : int , lowerCamelCase__ : str ) ->None: '''simple docstring''' _UpperCAmelCase : Optional[int] = self._trie for char in text: if char not in trie: _UpperCAmelCase : Any = {} _UpperCAmelCase : Optional[int] = trie[char] _UpperCAmelCase : Dict = True def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : str ) ->tuple | list: '''simple docstring''' _UpperCAmelCase : Union[str, Any] = self._trie for char in prefix: if char in trie: _UpperCAmelCase : Any = trie[char] else: return [] return self._elements(lowerCamelCase__ ) def lowerCAmelCase__ ( self : str , lowerCamelCase__ : dict ) ->tuple: '''simple docstring''' _UpperCAmelCase : Any = [] for c, v in d.items(): _UpperCAmelCase : Optional[int] = [''' '''] if c == END else [(c + s) for s in self._elements(lowerCamelCase__ )] result.extend(lowerCamelCase__ ) return tuple(lowerCamelCase__ ) lowerCamelCase__ = Trie() lowerCamelCase__ = ('depart', 'detergent', 'daring', 'dog', 'deer', 'deal') for word in words: trie.insert_word(word) def __lowerCAmelCase (__lowerCAmelCase ): _UpperCAmelCase : str = trie.find_word(__lowerCAmelCase ) return tuple(string + word for word in suffixes ) def __lowerCAmelCase (): print(autocomplete_using_trie("de" ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
234
from ...configuration_utils import PretrainedConfig from ...utils import logging _A : Dict = logging.get_logger(__name__) _A : Union[str, Any] = { 'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json', # See all ViT MSN models at https://huggingface.co/models?filter=vit_msn } class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ): _UpperCAmelCase : Any = "vit_msn" def __init__( self : Optional[Any] , A : Dict=7_6_8 , A : Union[str, Any]=1_2 , A : Optional[Any]=1_2 , A : List[Any]=3_0_7_2 , A : List[str]="gelu" , A : Optional[int]=0.0 , A : int=0.0 , A : int=0.02 , A : Tuple=1e-06 , A : int=2_2_4 , A : Union[str, Any]=1_6 , A : Dict=3 , A : Optional[Any]=True , **A : Optional[Any] , ) ->Dict: super().__init__(**A ) lowerCamelCase__ : int = hidden_size lowerCamelCase__ : Dict = num_hidden_layers lowerCamelCase__ : str = num_attention_heads lowerCamelCase__ : Tuple = intermediate_size lowerCamelCase__ : str = hidden_act lowerCamelCase__ : Optional[int] = hidden_dropout_prob lowerCamelCase__ : Any = attention_probs_dropout_prob lowerCamelCase__ : List[str] = initializer_range lowerCamelCase__ : Optional[int] = layer_norm_eps lowerCamelCase__ : Any = image_size lowerCamelCase__ : Any = patch_size lowerCamelCase__ : Union[str, Any] = num_channels lowerCamelCase__ : Tuple = qkv_bias
142
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE : List[Any] = {"openai-gpt": "https://huggingface.co/openai-gpt/resolve/main/config.json"} class _snake_case ( lowercase_ ): lowerCAmelCase_ : str = "openai-gpt" lowerCAmelCase_ : Optional[int] = { "max_position_embeddings": "n_positions", "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self , a__=40_478 , a__=512 , a__=768 , a__=12 , a__=12 , a__="gelu" , a__=0.1 , a__=0.1 , a__=0.1 , a__=1e-5 , a__=0.0_2 , a__="cls_index" , a__=True , a__=None , a__=True , a__=0.1 , **a__ , ) -> Union[str, Any]: '''simple docstring''' snake_case_ = vocab_size snake_case_ = n_positions snake_case_ = n_embd snake_case_ = n_layer snake_case_ = n_head snake_case_ = afn snake_case_ = resid_pdrop snake_case_ = embd_pdrop snake_case_ = attn_pdrop snake_case_ = layer_norm_epsilon snake_case_ = initializer_range snake_case_ = summary_type snake_case_ = summary_use_proj snake_case_ = summary_activation snake_case_ = summary_first_dropout snake_case_ = summary_proj_to_labels super().__init__(**a__ )
92
'''simple docstring''' import unittest import numpy as np import torch from diffusers import VersatileDiffusionImageVariationPipeline from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device _SCREAMING_SNAKE_CASE : Any = False class _snake_case ( unittest.TestCase ): pass @slow @require_torch_gpu class _snake_case ( unittest.TestCase ): def lowerCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' snake_case_ = VersatileDiffusionImageVariationPipeline.from_pretrained("shi-labs/versatile-diffusion" ) pipe.to(a__ ) pipe.set_progress_bar_config(disable=a__ ) snake_case_ = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" ) snake_case_ = torch.manual_seed(0 ) snake_case_ = pipe( image=a__ , generator=a__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images snake_case_ = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) snake_case_ = np.array([0.0_4_4_1, 0.0_4_6_9, 0.0_5_0_7, 0.0_5_7_5, 0.0_6_3_2, 0.0_6_5_0, 0.0_8_6_5, 0.0_9_0_9, 0.0_9_4_5] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
92
1
'''simple docstring''' import numpy as np from cva import COLOR_BGR2GRAY, cvtColor, imread from numpy import array, uinta from PIL import Image from digital_image_processing import change_contrast as cc from digital_image_processing import convert_to_negative as cn from digital_image_processing import sepia as sp from digital_image_processing.dithering import burkes as bs from digital_image_processing.edge_detection import canny from digital_image_processing.filters import convolve as conv from digital_image_processing.filters import gaussian_filter as gg from digital_image_processing.filters import local_binary_pattern as lbp from digital_image_processing.filters import median_filter as med from digital_image_processing.filters import sobel_filter as sob from digital_image_processing.resize import resize as rs A__ : Tuple =imread(r'''digital_image_processing/image_data/lena_small.jpg''') A__ : Union[str, Any] =cvtColor(img, COLOR_BGR2GRAY) def UpperCamelCase__ ( ): """simple docstring""" _lowerCAmelCase = cn.convert_to_negative(lowerCAmelCase ) # assert negative_img array for at least one True assert negative_img.any() def UpperCamelCase__ ( ): """simple docstring""" with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img: # Work around assertion for response assert str(cc.change_contrast(lowerCAmelCase , 1_10 ) ).startswith( """<PIL.Image.Image image mode=RGB size=100x100 at""" ) def UpperCamelCase__ ( ): """simple docstring""" _lowerCAmelCase = canny.gen_gaussian_kernel(9 , sigma=1.4 ) # Assert ambiguous array assert resp.all() def UpperCamelCase__ ( ): """simple docstring""" _lowerCAmelCase = imread("""digital_image_processing/image_data/lena_small.jpg""" , 0 ) # assert ambiguous array for all == True assert canny_img.all() _lowerCAmelCase = canny.canny(lowerCAmelCase ) # assert canny array for at least one True assert canny_array.any() def UpperCamelCase__ ( ): """simple docstring""" assert gg.gaussian_filter(lowerCAmelCase , 5 , sigma=0.9 ).all() def UpperCamelCase__ ( ): """simple docstring""" _lowerCAmelCase = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] ) _lowerCAmelCase = conv.img_convolve(lowerCAmelCase , lowerCAmelCase ).astype(lowerCAmelCase ) assert res.any() def UpperCamelCase__ ( ): """simple docstring""" assert med.median_filter(lowerCAmelCase , 3 ).any() def UpperCamelCase__ ( ): """simple docstring""" _lowerCAmelCase , _lowerCAmelCase = sob.sobel_filter(lowerCAmelCase ) assert grad.any() and theta.any() def UpperCamelCase__ ( ): """simple docstring""" _lowerCAmelCase = sp.make_sepia(lowerCAmelCase , 20 ) assert sepia.all() def UpperCamelCase__ ( lowerCAmelCase = "digital_image_processing/image_data/lena_small.jpg" ): """simple docstring""" _lowerCAmelCase = bs.Burkes(imread(lowerCAmelCase , 1 ) , 1_20 ) burkes.process() assert burkes.output_img.any() def UpperCamelCase__ ( lowerCAmelCase = "digital_image_processing/image_data/lena_small.jpg" , ): """simple docstring""" _lowerCAmelCase = rs.NearestNeighbour(imread(lowerCAmelCase , 1 ) , 4_00 , 2_00 ) nn.process() assert nn.output.any() def UpperCamelCase__ ( ): """simple docstring""" _lowerCAmelCase = """digital_image_processing/image_data/lena.jpg""" # Reading the image and converting it to grayscale. _lowerCAmelCase = imread(lowerCAmelCase , 0 ) # Test for get_neighbors_pixel function() return not None _lowerCAmelCase = 0 _lowerCAmelCase = 0 _lowerCAmelCase = image[x_coordinate][y_coordinate] _lowerCAmelCase = lbp.get_neighbors_pixel( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) assert neighbors_pixels is not None # Test for local_binary_pattern function() # Create a numpy array as the same height and width of read image _lowerCAmelCase = np.zeros((image.shape[0], image.shape[1]) ) # Iterating through the image and calculating the local binary pattern value # for each pixel. for i in range(0 , image.shape[0] ): for j in range(0 , image.shape[1] ): _lowerCAmelCase = lbp.local_binary_value(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) assert lbp_image.any()
70
"""simple docstring""" def __lowerCAmelCase ( lowercase : int ) -> bool: """simple docstring""" if p < 2: raise ValueError("p should not be less than 2!" ) elif p == 2: return True snake_case : Dict = 4 snake_case : str = (1 << p) - 1 for _ in range(p - 2 ): snake_case : Any = ((s * s) - 2) % m return s == 0 if __name__ == "__main__": print(lucas_lehmer_test(7)) print(lucas_lehmer_test(11))
203
0
import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class _lowerCamelCase( unittest.TestCase ): def __init__( self, lowerCamelCase, lowerCamelCase=7, lowerCamelCase=3, lowerCamelCase=18, lowerCamelCase=30, lowerCamelCase=4_00, lowerCamelCase=True, lowerCamelCase=None, lowerCamelCase=True, ) -> str: """simple docstring""" _lowercase : Tuple = size if size is not None else {'height': 18, 'width': 18} _lowercase : Tuple = parent _lowercase : Dict = batch_size _lowercase : Any = num_channels _lowercase : int = image_size _lowercase : List[str] = min_resolution _lowercase : str = max_resolution _lowercase : str = do_resize _lowercase : Optional[int] = size _lowercase : Any = apply_ocr def UpperCamelCase ( self) -> Any: """simple docstring""" return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class _lowerCamelCase( _a, unittest.TestCase ): lowercase_ : List[str] = LayoutLMvaImageProcessor if is_pytesseract_available() else None def UpperCamelCase ( self) -> int: """simple docstring""" _lowercase : Any = LayoutLMvaImageProcessingTester(self) @property def UpperCamelCase ( self) -> Dict: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase ( self) -> Union[str, Any]: """simple docstring""" _lowercase : List[str] = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(lowerCamelCase, 'do_resize')) self.assertTrue(hasattr(lowerCamelCase, 'size')) self.assertTrue(hasattr(lowerCamelCase, 'apply_ocr')) def UpperCamelCase ( self) -> Optional[int]: """simple docstring""" _lowercase : int = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {'height': 18, 'width': 18}) _lowercase : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict, size=42) self.assertEqual(image_processor.size, {'height': 42, 'width': 42}) def UpperCamelCase ( self) -> Dict: """simple docstring""" pass def UpperCamelCase ( self) -> Tuple: """simple docstring""" _lowercase : str = self.image_processing_class(**self.image_processor_dict) # create random PIL images _lowercase : Any = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase) for image in image_inputs: self.assertIsInstance(lowerCamelCase, Image.Image) # Test not batched input _lowercase : Dict = image_processing(image_inputs[0], return_tensors='pt') self.assertEqual( encoding.pixel_values.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ), ) self.assertIsInstance(encoding.words, lowerCamelCase) self.assertIsInstance(encoding.boxes, lowerCamelCase) # Test batched _lowercase : str = image_processing(lowerCamelCase, return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ), ) def UpperCamelCase ( self) -> List[str]: """simple docstring""" _lowercase : Optional[int] = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors _lowercase : List[str] = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase, numpify=lowerCamelCase) for image in image_inputs: self.assertIsInstance(lowerCamelCase, np.ndarray) # Test not batched input _lowercase : List[Any] = image_processing(image_inputs[0], return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ), ) # Test batched _lowercase : Union[str, Any] = image_processing(lowerCamelCase, return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ), ) def UpperCamelCase ( self) -> Dict: """simple docstring""" _lowercase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors _lowercase : Optional[Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase, torchify=lowerCamelCase) for image in image_inputs: self.assertIsInstance(lowerCamelCase, torch.Tensor) # Test not batched input _lowercase : Tuple = image_processing(image_inputs[0], return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ), ) # Test batched _lowercase : List[Any] = image_processing(lowerCamelCase, return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ), ) def UpperCamelCase ( self) -> Any: """simple docstring""" _lowercase : Optional[Any] = LayoutLMvaImageProcessor() from datasets import load_dataset _lowercase : Optional[Any] = load_dataset('hf-internal-testing/fixtures_docvqa', split='test') _lowercase : Optional[Any] = Image.open(ds[0]['file']).convert('RGB') _lowercase : Optional[Any] = image_processing(lowerCamelCase, return_tensors='pt') self.assertEqual(encoding.pixel_values.shape, (1, 3, 2_24, 2_24)) self.assertEqual(len(encoding.words), len(encoding.boxes)) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 _lowercase : List[str] = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231 _lowercase : Any = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words, lowerCamelCase) self.assertListEqual(encoding.boxes, lowerCamelCase) # with apply_OCR = False _lowercase : List[str] = LayoutLMvaImageProcessor(apply_ocr=lowerCamelCase) _lowercase : Union[str, Any] = image_processing(lowerCamelCase, return_tensors='pt') self.assertEqual(encoding.pixel_values.shape, (1, 3, 2_24, 2_24))
84
import tempfile import numpy as np import torch from transformers import AutoTokenizer, TaEncoderModel from diffusers import DDPMScheduler, UNetaDConditionModel from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.pipelines.deepfloyd_if import IFWatermarker from diffusers.utils.testing_utils import torch_device from ..test_pipelines_common import to_np class _lowerCamelCase: def UpperCamelCase ( self) -> Union[str, Any]: """simple docstring""" torch.manual_seed(0) _lowercase : Dict = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5') torch.manual_seed(0) _lowercase : List[str] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5') torch.manual_seed(0) _lowercase : Optional[int] = UNetaDConditionModel( sample_size=32, layers_per_block=1, block_out_channels=[32, 64], down_block_types=[ 'ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D', ], mid_block_type='UNetMidBlock2DSimpleCrossAttn', up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'], in_channels=3, out_channels=6, cross_attention_dim=32, encoder_hid_dim=32, attention_head_dim=8, addition_embed_type='text', addition_embed_type_num_heads=2, cross_attention_norm='group_norm', resnet_time_scale_shift='scale_shift', act_fn='gelu', ) unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests torch.manual_seed(0) _lowercase : Dict = DDPMScheduler( num_train_timesteps=10_00, beta_schedule='squaredcos_cap_v2', beta_start=0.0_0_0_1, beta_end=0.0_2, thresholding=lowerCamelCase, dynamic_thresholding_ratio=0.9_5, sample_max_value=1.0, prediction_type='epsilon', variance_type='learned_range', ) torch.manual_seed(0) _lowercase : List[Any] = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def UpperCamelCase ( self) -> List[str]: """simple docstring""" torch.manual_seed(0) _lowercase : List[str] = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5') torch.manual_seed(0) _lowercase : int = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5') torch.manual_seed(0) _lowercase : List[str] = UNetaDConditionModel( sample_size=32, layers_per_block=[1, 2], block_out_channels=[32, 64], down_block_types=[ 'ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D', ], mid_block_type='UNetMidBlock2DSimpleCrossAttn', up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'], in_channels=6, out_channels=6, cross_attention_dim=32, encoder_hid_dim=32, attention_head_dim=8, addition_embed_type='text', addition_embed_type_num_heads=2, cross_attention_norm='group_norm', resnet_time_scale_shift='scale_shift', act_fn='gelu', class_embed_type='timestep', mid_block_scale_factor=1.4_1_4, time_embedding_act_fn='gelu', time_embedding_dim=32, ) unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests torch.manual_seed(0) _lowercase : Optional[int] = DDPMScheduler( num_train_timesteps=10_00, beta_schedule='squaredcos_cap_v2', beta_start=0.0_0_0_1, beta_end=0.0_2, thresholding=lowerCamelCase, dynamic_thresholding_ratio=0.9_5, sample_max_value=1.0, prediction_type='epsilon', variance_type='learned_range', ) torch.manual_seed(0) _lowercase : str = DDPMScheduler( num_train_timesteps=10_00, beta_schedule='squaredcos_cap_v2', beta_start=0.0_0_0_1, beta_end=0.0_2, ) torch.manual_seed(0) _lowercase : Union[str, Any] = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "image_noising_scheduler": image_noising_scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def UpperCamelCase ( self) -> List[Any]: """simple docstring""" _lowercase : List[Any] = self.get_dummy_components() _lowercase : List[str] = self.pipeline_class(**lowerCamelCase) pipe.to(lowerCamelCase) pipe.set_progress_bar_config(disable=lowerCamelCase) _lowercase : int = self.get_dummy_inputs(lowerCamelCase) _lowercase : int = inputs['prompt'] _lowercase : Dict = inputs['generator'] _lowercase : Optional[int] = inputs['num_inference_steps'] _lowercase : str = inputs['output_type'] if "image" in inputs: _lowercase : List[Any] = inputs['image'] else: _lowercase : List[Any] = None if "mask_image" in inputs: _lowercase : Union[str, Any] = inputs['mask_image'] else: _lowercase : Dict = None if "original_image" in inputs: _lowercase : Any = inputs['original_image'] else: _lowercase : Tuple = None _lowercase , _lowercase : str = pipe.encode_prompt(lowerCamelCase) # inputs with prompt converted to embeddings _lowercase : Any = { 'prompt_embeds': prompt_embeds, 'negative_prompt_embeds': negative_prompt_embeds, 'generator': generator, 'num_inference_steps': num_inference_steps, 'output_type': output_type, } if image is not None: _lowercase : int = image if mask_image is not None: _lowercase : str = mask_image if original_image is not None: _lowercase : Optional[Any] = original_image # set all optional components to None for optional_component in pipe._optional_components: setattr(lowerCamelCase, lowerCamelCase, lowerCamelCase) _lowercase : Dict = pipe(**lowerCamelCase)[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(lowerCamelCase) _lowercase : Any = self.pipeline_class.from_pretrained(lowerCamelCase) pipe_loaded.to(lowerCamelCase) pipe_loaded.set_progress_bar_config(disable=lowerCamelCase) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests for optional_component in pipe._optional_components: self.assertTrue( getattr(lowerCamelCase, lowerCamelCase) is None, F'''`{optional_component}` did not stay set to None after loading.''', ) _lowercase : Dict = self.get_dummy_inputs(lowerCamelCase) _lowercase : Optional[Any] = inputs['generator'] _lowercase : Any = inputs['num_inference_steps'] _lowercase : List[Any] = inputs['output_type'] # inputs with prompt converted to embeddings _lowercase : Optional[int] = { 'prompt_embeds': prompt_embeds, 'negative_prompt_embeds': negative_prompt_embeds, 'generator': generator, 'num_inference_steps': num_inference_steps, 'output_type': output_type, } if image is not None: _lowercase : str = image if mask_image is not None: _lowercase : Optional[int] = mask_image if original_image is not None: _lowercase : int = original_image _lowercase : str = pipe_loaded(**lowerCamelCase)[0] _lowercase : List[Any] = np.abs(to_np(lowerCamelCase) - to_np(lowerCamelCase)).max() self.assertLess(lowerCamelCase, 1E-4) def UpperCamelCase ( self) -> Union[str, Any]: """simple docstring""" _lowercase : Optional[Any] = self.get_dummy_components() _lowercase : Any = self.pipeline_class(**lowerCamelCase) pipe.to(lowerCamelCase) pipe.set_progress_bar_config(disable=lowerCamelCase) _lowercase : List[str] = self.get_dummy_inputs(lowerCamelCase) _lowercase : Tuple = pipe(**lowerCamelCase)[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(lowerCamelCase) _lowercase : List[str] = self.pipeline_class.from_pretrained(lowerCamelCase) pipe_loaded.to(lowerCamelCase) pipe_loaded.set_progress_bar_config(disable=lowerCamelCase) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests _lowercase : int = self.get_dummy_inputs(lowerCamelCase) _lowercase : Tuple = pipe_loaded(**lowerCamelCase)[0] _lowercase : str = np.abs(to_np(lowerCamelCase) - to_np(lowerCamelCase)).max() self.assertLess(lowerCamelCase, 1E-4)
84
1
import gc import random import unittest import numpy as np import torch from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import ( DiffusionPipeline, UnCLIPImageVariationPipeline, UnCLIPScheduler, UNetaDConditionModel, UNetaDModel, ) from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel from diffusers.utils import floats_tensor, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class _UpperCamelCase ( __A , unittest.TestCase ): '''simple docstring''' lowerCamelCase__ =UnCLIPImageVariationPipeline lowerCamelCase__ =IMAGE_VARIATION_PARAMS - {'height', 'width', 'guidance_scale'} lowerCamelCase__ =IMAGE_VARIATION_BATCH_PARAMS lowerCamelCase__ =[ 'generator', 'return_dict', 'decoder_num_inference_steps', 'super_res_num_inference_steps', ] lowerCamelCase__ =False @property def __UpperCamelCase ( self : int ) -> List[Any]: """simple docstring""" return 32 @property def __UpperCamelCase ( self : Dict ) -> int: """simple docstring""" return 32 @property def __UpperCamelCase ( self : List[str] ) -> Dict: """simple docstring""" return self.time_input_dim @property def __UpperCamelCase ( self : Tuple ) -> Union[str, Any]: """simple docstring""" return self.time_input_dim * 4 @property def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]: """simple docstring""" return 100 @property def __UpperCamelCase ( self : Optional[Any] ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) return tokenizer @property def __UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Optional[Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) return CLIPTextModelWithProjection(a ) @property def __UpperCamelCase ( self : Optional[int] ) -> Dict: """simple docstring""" torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : str = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) return CLIPVisionModelWithProjection(a ) @property def __UpperCamelCase ( self : Optional[int] ) -> List[str]: """simple docstring""" torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : List[str] = { "clip_embeddings_dim": self.text_embedder_hidden_size, "time_embed_dim": self.time_embed_dim, "cross_attention_dim": self.cross_attention_dim, } SCREAMING_SNAKE_CASE : Dict = UnCLIPTextProjModel(**a ) return model @property def __UpperCamelCase ( self : int ) -> str: """simple docstring""" torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : str = { "sample_size": 32, # RGB in channels "in_channels": 3, # Out channels is double in channels because predicts mean and variance "out_channels": 6, "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": "identity", } SCREAMING_SNAKE_CASE : int = UNetaDConditionModel(**a ) return model @property def __UpperCamelCase ( self : Tuple ) -> int: """simple docstring""" return { "sample_size": 64, "layers_per_block": 1, "down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"), "up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"), "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "in_channels": 6, "out_channels": 3, } @property def __UpperCamelCase ( self : Dict ) -> Optional[Any]: """simple docstring""" torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Optional[Any] = UNetaDModel(**self.dummy_super_res_kwargs ) return model @property def __UpperCamelCase ( self : Any ) -> Optional[Any]: """simple docstring""" torch.manual_seed(1 ) SCREAMING_SNAKE_CASE : Tuple = UNetaDModel(**self.dummy_super_res_kwargs ) return model def __UpperCamelCase ( self : int ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE : Any = self.dummy_decoder SCREAMING_SNAKE_CASE : List[str] = self.dummy_text_proj SCREAMING_SNAKE_CASE : int = self.dummy_text_encoder SCREAMING_SNAKE_CASE : int = self.dummy_tokenizer SCREAMING_SNAKE_CASE : str = self.dummy_super_res_first SCREAMING_SNAKE_CASE : List[Any] = self.dummy_super_res_last SCREAMING_SNAKE_CASE : str = UnCLIPScheduler( variance_type="learned_range" , prediction_type="epsilon" , num_train_timesteps=1000 , ) SCREAMING_SNAKE_CASE : str = UnCLIPScheduler( variance_type="fixed_small_log" , prediction_type="epsilon" , num_train_timesteps=1000 , ) SCREAMING_SNAKE_CASE : List[str] = CLIPImageProcessor(crop_size=32 , size=32 ) SCREAMING_SNAKE_CASE : Dict = self.dummy_image_encoder return { "decoder": decoder, "text_encoder": text_encoder, "tokenizer": tokenizer, "text_proj": text_proj, "feature_extractor": feature_extractor, "image_encoder": image_encoder, "super_res_first": super_res_first, "super_res_last": super_res_last, "decoder_scheduler": decoder_scheduler, "super_res_scheduler": super_res_scheduler, } def __UpperCamelCase ( self : Any , a : str , a : Union[str, Any]=0 , a : Tuple=True ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(a ) ).to(a ) if str(a ).startswith("mps" ): SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(a ) else: SCREAMING_SNAKE_CASE : int = torch.Generator(device=a ).manual_seed(a ) if pil_image: SCREAMING_SNAKE_CASE : Dict = input_image * 0.5 + 0.5 SCREAMING_SNAKE_CASE : List[Any] = input_image.clamp(0 , 1 ) SCREAMING_SNAKE_CASE : Union[str, Any] = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() SCREAMING_SNAKE_CASE : Tuple = DiffusionPipeline.numpy_to_pil(a )[0] return { "image": input_image, "generator": generator, "decoder_num_inference_steps": 2, "super_res_num_inference_steps": 2, "output_type": "np", } def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = "cpu" SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components() SCREAMING_SNAKE_CASE : List[str] = self.pipeline_class(**a ) SCREAMING_SNAKE_CASE : int = pipe.to(a ) pipe.set_progress_bar_config(disable=a ) SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_inputs(a , pil_image=a ) SCREAMING_SNAKE_CASE : Dict = pipe(**a ) SCREAMING_SNAKE_CASE : Any = output.images SCREAMING_SNAKE_CASE : int = self.get_dummy_inputs(a , pil_image=a ) SCREAMING_SNAKE_CASE : int = pipe( **a , return_dict=a , )[0] SCREAMING_SNAKE_CASE : List[Any] = image[0, -3:, -3:, -1] SCREAMING_SNAKE_CASE : List[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) SCREAMING_SNAKE_CASE : Union[str, Any] = np.array( [ 0.9997, 0.0002, 0.9997, 0.9997, 0.9969, 0.0023, 0.9997, 0.9969, 0.9970, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def __UpperCamelCase ( self : int ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = "cpu" SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_components() SCREAMING_SNAKE_CASE : List[str] = self.pipeline_class(**a ) SCREAMING_SNAKE_CASE : str = pipe.to(a ) pipe.set_progress_bar_config(disable=a ) SCREAMING_SNAKE_CASE : Any = self.get_dummy_inputs(a , pil_image=a ) SCREAMING_SNAKE_CASE : Optional[int] = pipe(**a ) SCREAMING_SNAKE_CASE : Optional[Any] = output.images SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_inputs(a , pil_image=a ) SCREAMING_SNAKE_CASE : Dict = pipe( **a , return_dict=a , )[0] SCREAMING_SNAKE_CASE : List[str] = image[0, -3:, -3:, -1] SCREAMING_SNAKE_CASE : Any = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) SCREAMING_SNAKE_CASE : int = np.array([0.9997, 0.0003, 0.9997, 0.9997, 0.9970, 0.0024, 0.9997, 0.9971, 0.9971] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def __UpperCamelCase ( self : List[str] ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE : int = "cpu" SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_components() SCREAMING_SNAKE_CASE : List[str] = self.pipeline_class(**a ) SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.to(a ) pipe.set_progress_bar_config(disable=a ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(a , pil_image=a ) SCREAMING_SNAKE_CASE : str = [ pipeline_inputs["image"], pipeline_inputs["image"], ] SCREAMING_SNAKE_CASE : Dict = pipe(**a ) SCREAMING_SNAKE_CASE : Optional[int] = output.images SCREAMING_SNAKE_CASE : int = self.get_dummy_inputs(a , pil_image=a ) SCREAMING_SNAKE_CASE : str = [ tuple_pipeline_inputs["image"], tuple_pipeline_inputs["image"], ] SCREAMING_SNAKE_CASE : List[str] = pipe( **a , return_dict=a , )[0] SCREAMING_SNAKE_CASE : Optional[int] = image[0, -3:, -3:, -1] SCREAMING_SNAKE_CASE : Tuple = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (2, 64, 64, 3) SCREAMING_SNAKE_CASE : List[Any] = np.array( [ 0.9997, 0.9989, 0.0008, 0.0021, 0.9960, 0.0018, 0.0014, 0.0002, 0.9933, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE : Any = torch.device("cpu" ) class _UpperCamelCase : '''simple docstring''' lowerCamelCase__ =1 SCREAMING_SNAKE_CASE : str = self.get_dummy_components() SCREAMING_SNAKE_CASE : Optional[int] = self.pipeline_class(**a ) SCREAMING_SNAKE_CASE : str = pipe.to(a ) pipe.set_progress_bar_config(disable=a ) SCREAMING_SNAKE_CASE : str = torch.Generator(device=a ).manual_seed(0 ) SCREAMING_SNAKE_CASE : Dict = pipe.decoder.dtype SCREAMING_SNAKE_CASE : List[str] = 1 SCREAMING_SNAKE_CASE : List[str] = ( batch_size, pipe.decoder.config.in_channels, pipe.decoder.config.sample_size, pipe.decoder.config.sample_size, ) SCREAMING_SNAKE_CASE : List[Any] = pipe.prepare_latents( a , dtype=a , device=a , generator=a , latents=a , scheduler=DummyScheduler() ) SCREAMING_SNAKE_CASE : List[str] = ( batch_size, pipe.super_res_first.config.in_channels // 2, pipe.super_res_first.config.sample_size, pipe.super_res_first.config.sample_size, ) SCREAMING_SNAKE_CASE : int = pipe.prepare_latents( a , dtype=a , device=a , generator=a , latents=a , scheduler=DummyScheduler() ) SCREAMING_SNAKE_CASE : str = self.get_dummy_inputs(a , pil_image=a ) SCREAMING_SNAKE_CASE : Union[str, Any] = pipe( **a , decoder_latents=a , super_res_latents=a ).images SCREAMING_SNAKE_CASE : str = self.get_dummy_inputs(a , pil_image=a ) # Don't pass image, instead pass embedding SCREAMING_SNAKE_CASE : List[str] = pipeline_inputs.pop("image" ) SCREAMING_SNAKE_CASE : str = pipe.image_encoder(a ).image_embeds SCREAMING_SNAKE_CASE : Optional[Any] = pipe( **a , decoder_latents=a , super_res_latents=a , image_embeddings=a , ).images # make sure passing text embeddings manually is identical assert np.abs(img_out_a - img_out_a ).max() < 1e-4 @skip_mps def __UpperCamelCase ( self : Optional[Any] ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = torch_device == "cpu" # Check is relaxed because there is not a torch 2.0 sliced attention added kv processor SCREAMING_SNAKE_CASE : List[Any] = 1e-2 self._test_attention_slicing_forward_pass( test_max_difference=a , expected_max_diff=a ) @skip_mps def __UpperCamelCase ( self : Optional[int] ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = torch_device == "cpu" SCREAMING_SNAKE_CASE : Optional[int] = True SCREAMING_SNAKE_CASE : List[Any] = [ "decoder_num_inference_steps", "super_res_num_inference_steps", ] self._test_inference_batch_single_identical( test_max_difference=a , relax_max_difference=a , additional_params_copy_to_batched_inputs=a , ) def __UpperCamelCase ( self : Union[str, Any] ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = [ "decoder_num_inference_steps", "super_res_num_inference_steps", ] if torch_device == "mps": # TODO: MPS errors with larger batch sizes SCREAMING_SNAKE_CASE : List[str] = [2, 3] self._test_inference_batch_consistent( batch_sizes=a , additional_params_copy_to_batched_inputs=a , ) else: self._test_inference_batch_consistent( additional_params_copy_to_batched_inputs=a ) @skip_mps def __UpperCamelCase ( self : Tuple ) -> Any: """simple docstring""" return super().test_dict_tuple_outputs_equivalent() @skip_mps def __UpperCamelCase ( self : List[str] ) -> Dict: """simple docstring""" return super().test_save_load_local() @skip_mps def __UpperCamelCase ( self : Tuple ) -> Dict: """simple docstring""" return super().test_save_load_optional_components() @slow @require_torch_gpu class _UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def __UpperCamelCase ( self : List[Any] ) -> int: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png" ) SCREAMING_SNAKE_CASE : Optional[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/unclip/karlo_v1_alpha_cat_variation_fp16.npy" ) SCREAMING_SNAKE_CASE : str = UnCLIPImageVariationPipeline.from_pretrained( "kakaobrain/karlo-v1-alpha-image-variations" , torch_dtype=torch.floataa ) SCREAMING_SNAKE_CASE : Tuple = pipeline.to(a ) pipeline.set_progress_bar_config(disable=a ) SCREAMING_SNAKE_CASE : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 ) SCREAMING_SNAKE_CASE : str = pipeline( a , generator=a , output_type="np" , ) SCREAMING_SNAKE_CASE : Union[str, Any] = output.images[0] assert image.shape == (256, 256, 3) assert_mean_pixel_difference(a , a , 15 )
76
from math import factorial def lowerCamelCase__ ( _a , _a , _a): if successes > trials: raise ValueError("successes must be lower or equal to trials") if trials < 0 or successes < 0: raise ValueError("the function is defined for non-negative integers") if not isinstance(_a , _a) or not isinstance(_a , _a): raise ValueError("the function is defined for non-negative integers") if not 0 < prob < 1: raise ValueError("prob has to be in range of 1 - 0") SCREAMING_SNAKE_CASE : int = (prob**successes) * ((1 - prob) ** (trials - successes)) # Calculate the binomial coefficient: n! / k!(n-k)! SCREAMING_SNAKE_CASE : List[Any] = float(factorial(_a)) coefficient /= factorial(_a) * factorial(trials - successes) return probability * coefficient if __name__ == "__main__": from doctest import testmod testmod() print('Probability of 2 successes out of 4 trails') print('with probability of 0.75 is:', end=' ') print(binomial_distribution(2, 4, 0.75))
76
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging UpperCAmelCase : int = logging.get_logger(__name__) if is_vision_available(): import PIL class __lowerCAmelCase ( UpperCamelCase__): _lowercase : int = ["""pixel_values"""] def __init__( self , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = PILImageResampling.BICUBIC , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = True , lowerCAmelCase__ = 1 / 2_5_5 , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = True , **lowerCAmelCase__ , ) -> None: '''simple docstring''' super().__init__(**lowerCAmelCase__ ) a__ : Dict =size if size is not None else {"shortest_edge": 2_2_4} a__ : int =get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ ) a__ : List[str] =crop_size if crop_size is not None else {"height": 2_2_4, "width": 2_2_4} a__ : str =get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ , param_name="crop_size" ) a__ : Union[str, Any] =do_resize a__ : Tuple =size a__ : List[str] =resample a__ : int =do_center_crop a__ : Any =crop_size a__ : Tuple =do_rescale a__ : Optional[int] =rescale_factor a__ : str =do_normalize a__ : Optional[Any] =image_mean if image_mean is not None else OPENAI_CLIP_MEAN a__ : List[str] =image_std if image_std is not None else OPENAI_CLIP_STD a__ : str =do_convert_rgb def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = PILImageResampling.BICUBIC , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> np.ndarray: '''simple docstring''' a__ : List[Any] =get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ ) if "shortest_edge" not in size: raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) a__ : Dict =get_resize_output_image_size(lowerCAmelCase__ , size=size["shortest_edge"] , default_to_square=lowerCAmelCase__ ) return resize(lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ ) def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> np.ndarray: '''simple docstring''' a__ : str =get_size_dict(lowerCAmelCase__ ) if "height" not in size or "width" not in size: raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(lowerCAmelCase__ , size=(size["height"], size["width"]) , data_format=lowerCAmelCase__ , **lowerCAmelCase__ ) def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> Dict: '''simple docstring''' return rescale(lowerCAmelCase__ , scale=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ ) def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> np.ndarray: '''simple docstring''' return normalize(lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ ) def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = ChannelDimension.FIRST , **lowerCAmelCase__ , ) -> PIL.Image.Image: '''simple docstring''' a__ : List[str] =do_resize if do_resize is not None else self.do_resize a__ : Optional[Any] =size if size is not None else self.size a__ : int =get_size_dict(lowerCAmelCase__ , param_name="size" , default_to_square=lowerCAmelCase__ ) a__ : str =resample if resample is not None else self.resample a__ : Tuple =do_center_crop if do_center_crop is not None else self.do_center_crop a__ : Optional[Any] =crop_size if crop_size is not None else self.crop_size a__ : Any =get_size_dict(lowerCAmelCase__ , param_name="crop_size" , default_to_square=lowerCAmelCase__ ) a__ : Tuple =do_rescale if do_rescale is not None else self.do_rescale a__ : Optional[int] =rescale_factor if rescale_factor is not None else self.rescale_factor a__ : Union[str, Any] =do_normalize if do_normalize is not None else self.do_normalize a__ : int =image_mean if image_mean is not None else self.image_mean a__ : List[Any] =image_std if image_std is not None else self.image_std a__ : Tuple =do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb a__ : Optional[int] =make_list_of_images(lowerCAmelCase__ ) if not valid_images(lowerCAmelCase__ ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # PIL RGBA images are converted to RGB if do_convert_rgb: a__ : List[str] =[convert_to_rgb(lowerCAmelCase__ ) for image in images] # All transformations expect numpy arrays. a__ : List[str] =[to_numpy_array(lowerCAmelCase__ ) for image in images] if do_resize: a__ : List[Any] =[self.resize(image=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ ) for image in images] if do_center_crop: a__ : Tuple =[self.center_crop(image=lowerCAmelCase__ , size=lowerCAmelCase__ ) for image in images] if do_rescale: a__ : List[Any] =[self.rescale(image=lowerCAmelCase__ , scale=lowerCAmelCase__ ) for image in images] if do_normalize: a__ : Optional[Any] =[self.normalize(image=lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ ) for image in images] a__ : Union[str, Any] =[to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__ ) for image in images] a__ : List[str] ={"pixel_values": images} return BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__ )
148
import tempfile import torch from diffusers import PNDMScheduler from .test_schedulers import SchedulerCommonTest class __lowerCAmelCase ( UpperCamelCase__): _lowercase : Any = (PNDMScheduler,) _lowercase : str = (("""num_inference_steps""", 50),) def _lowercase ( self , **lowerCAmelCase__ ) -> int: '''simple docstring''' a__ : Dict ={ "num_train_timesteps": 1_0_0_0, "beta_start": 0.00_01, "beta_end": 0.02, "beta_schedule": "linear", } config.update(**lowerCAmelCase__ ) return config def _lowercase ( self , lowerCAmelCase__=0 , **lowerCAmelCase__ ) -> List[Any]: '''simple docstring''' a__ : Optional[int] =dict(self.forward_default_kwargs ) a__ : Tuple =kwargs.pop("num_inference_steps" , lowerCAmelCase__ ) a__ : List[str] =self.dummy_sample a__ : List[str] =0.1 * sample a__ : str =[residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: a__ : int =self.get_scheduler_config(**lowerCAmelCase__ ) a__ : Union[str, Any] =scheduler_class(**lowerCAmelCase__ ) scheduler.set_timesteps(lowerCAmelCase__ ) # copy over dummy past residuals a__ : Any =dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowerCAmelCase__ ) a__ : List[Any] =scheduler_class.from_pretrained(lowerCAmelCase__ ) new_scheduler.set_timesteps(lowerCAmelCase__ ) # copy over dummy past residuals a__ : str =dummy_past_residuals[:] a__ : Any =scheduler.step_prk(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample a__ : Dict =new_scheduler.step_prk(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" a__ : Optional[int] =scheduler.step_plms(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample a__ : Union[str, Any] =new_scheduler.step_plms(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def _lowercase ( self ) -> int: '''simple docstring''' pass def _lowercase ( self , lowerCAmelCase__=0 , **lowerCAmelCase__ ) -> Optional[Any]: '''simple docstring''' a__ : Optional[int] =dict(self.forward_default_kwargs ) a__ : List[str] =kwargs.pop("num_inference_steps" , lowerCAmelCase__ ) a__ : List[str] =self.dummy_sample a__ : int =0.1 * sample a__ : Tuple =[residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: a__ : Dict =self.get_scheduler_config() a__ : List[str] =scheduler_class(**lowerCAmelCase__ ) scheduler.set_timesteps(lowerCAmelCase__ ) # copy over dummy past residuals (must be after setting timesteps) a__ : Dict =dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowerCAmelCase__ ) a__ : Dict =scheduler_class.from_pretrained(lowerCAmelCase__ ) # copy over dummy past residuals new_scheduler.set_timesteps(lowerCAmelCase__ ) # copy over dummy past residual (must be after setting timesteps) a__ : Optional[int] =dummy_past_residuals[:] a__ : Optional[Any] =scheduler.step_prk(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample a__ : List[Any] =new_scheduler.step_prk(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" a__ : List[str] =scheduler.step_plms(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample a__ : Any =new_scheduler.step_plms(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def _lowercase ( self , **lowerCAmelCase__ ) -> int: '''simple docstring''' a__ : Union[str, Any] =self.scheduler_classes[0] a__ : Optional[Any] =self.get_scheduler_config(**lowerCAmelCase__ ) a__ : Any =scheduler_class(**lowerCAmelCase__ ) a__ : int =1_0 a__ : Union[str, Any] =self.dummy_model() a__ : Optional[int] =self.dummy_sample_deter scheduler.set_timesteps(lowerCAmelCase__ ) for i, t in enumerate(scheduler.prk_timesteps ): a__ : List[Any] =model(lowerCAmelCase__ , lowerCAmelCase__ ) a__ : Optional[Any] =scheduler.step_prk(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).prev_sample for i, t in enumerate(scheduler.plms_timesteps ): a__ : int =model(lowerCAmelCase__ , lowerCAmelCase__ ) a__ : int =scheduler.step_plms(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).prev_sample return sample def _lowercase ( self ) -> Union[str, Any]: '''simple docstring''' a__ : str =dict(self.forward_default_kwargs ) a__ : Tuple =kwargs.pop("num_inference_steps" , lowerCAmelCase__ ) for scheduler_class in self.scheduler_classes: a__ : Union[str, Any] =self.get_scheduler_config() a__ : List[str] =scheduler_class(**lowerCAmelCase__ ) a__ : List[Any] =self.dummy_sample a__ : Dict =0.1 * sample if num_inference_steps is not None and hasattr(lowerCAmelCase__ , "set_timesteps" ): scheduler.set_timesteps(lowerCAmelCase__ ) elif num_inference_steps is not None and not hasattr(lowerCAmelCase__ , "set_timesteps" ): a__ : int =num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) a__ : Tuple =[residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] a__ : str =dummy_past_residuals[:] a__ : List[Any] =scheduler.step_prk(lowerCAmelCase__ , 0 , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample a__ : int =scheduler.step_prk(lowerCAmelCase__ , 1 , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) a__ : List[str] =scheduler.step_plms(lowerCAmelCase__ , 0 , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample a__ : Dict =scheduler.step_plms(lowerCAmelCase__ , 1 , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def _lowercase ( self ) -> Tuple: '''simple docstring''' for timesteps in [1_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=lowerCAmelCase__ ) def _lowercase ( self ) -> Optional[Any]: '''simple docstring''' for steps_offset in [0, 1]: self.check_over_configs(steps_offset=lowerCAmelCase__ ) a__ : Optional[Any] =self.scheduler_classes[0] a__ : Tuple =self.get_scheduler_config(steps_offset=1 ) a__ : Optional[Any] =scheduler_class(**lowerCAmelCase__ ) scheduler.set_timesteps(1_0 ) assert torch.equal( scheduler.timesteps , torch.LongTensor( [9_0_1, 8_5_1, 8_5_1, 8_0_1, 8_0_1, 7_5_1, 7_5_1, 7_0_1, 7_0_1, 6_5_1, 6_5_1, 6_0_1, 6_0_1, 5_0_1, 4_0_1, 3_0_1, 2_0_1, 1_0_1, 1] ) , ) def _lowercase ( self ) -> Tuple: '''simple docstring''' for beta_start, beta_end in zip([0.00_01, 0.0_01] , [0.0_02, 0.02] ): self.check_over_configs(beta_start=lowerCAmelCase__ , beta_end=lowerCAmelCase__ ) def _lowercase ( self ) -> List[str]: '''simple docstring''' for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=lowerCAmelCase__ ) def _lowercase ( self ) -> List[str]: '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=lowerCAmelCase__ ) def _lowercase ( self ) -> Dict: '''simple docstring''' for t in [1, 5, 1_0]: self.check_over_forward(time_step=lowerCAmelCase__ ) def _lowercase ( self ) -> List[Any]: '''simple docstring''' for t, num_inference_steps in zip([1, 5, 1_0] , [1_0, 5_0, 1_0_0] ): self.check_over_forward(num_inference_steps=lowerCAmelCase__ ) def _lowercase ( self ) -> str: '''simple docstring''' a__ : Dict =2_7 for scheduler_class in self.scheduler_classes: a__ : Tuple =self.dummy_sample a__ : Dict =0.1 * sample a__ : Dict =self.get_scheduler_config() a__ : int =scheduler_class(**lowerCAmelCase__ ) scheduler.set_timesteps(lowerCAmelCase__ ) # before power of 3 fix, would error on first step, so we only need to do two for i, t in enumerate(scheduler.prk_timesteps[:2] ): a__ : Any =scheduler.step_prk(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).prev_sample def _lowercase ( self ) -> Optional[Any]: '''simple docstring''' with self.assertRaises(lowerCAmelCase__ ): a__ : List[Any] =self.scheduler_classes[0] a__ : Dict =self.get_scheduler_config() a__ : Tuple =scheduler_class(**lowerCAmelCase__ ) scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample def _lowercase ( self ) -> Optional[Any]: '''simple docstring''' a__ : List[Any] =self.full_loop() a__ : str =torch.sum(torch.abs(lowerCAmelCase__ ) ) a__ : Optional[Any] =torch.mean(torch.abs(lowerCAmelCase__ ) ) assert abs(result_sum.item() - 1_98.13_18 ) < 1E-2 assert abs(result_mean.item() - 0.25_80 ) < 1E-3 def _lowercase ( self ) -> str: '''simple docstring''' a__ : str =self.full_loop(prediction_type="v_prediction" ) a__ : int =torch.sum(torch.abs(lowerCAmelCase__ ) ) a__ : Optional[int] =torch.mean(torch.abs(lowerCAmelCase__ ) ) assert abs(result_sum.item() - 67.39_86 ) < 1E-2 assert abs(result_mean.item() - 0.08_78 ) < 1E-3 def _lowercase ( self ) -> Optional[int]: '''simple docstring''' a__ : Tuple =self.full_loop(set_alpha_to_one=lowerCAmelCase__ , beta_start=0.01 ) a__ : str =torch.sum(torch.abs(lowerCAmelCase__ ) ) a__ : Dict =torch.mean(torch.abs(lowerCAmelCase__ ) ) assert abs(result_sum.item() - 2_30.03_99 ) < 1E-2 assert abs(result_mean.item() - 0.29_95 ) < 1E-3 def _lowercase ( self ) -> Union[str, Any]: '''simple docstring''' a__ : Dict =self.full_loop(set_alpha_to_one=lowerCAmelCase__ , beta_start=0.01 ) a__ : Union[str, Any] =torch.sum(torch.abs(lowerCAmelCase__ ) ) a__ : Union[str, Any] =torch.mean(torch.abs(lowerCAmelCase__ ) ) assert abs(result_sum.item() - 1_86.94_82 ) < 1E-2 assert abs(result_mean.item() - 0.24_34 ) < 1E-3
148
1
'''simple docstring''' from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { 'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json', 'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json', 'microsoft/deberta-v2-xlarge-mnli': ( 'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json' ), 'microsoft/deberta-v2-xxlarge-mnli': ( 'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json' ), } class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' lowerCAmelCase_ : Optional[int] = """deberta-v2""" def __init__( self : str , _UpperCAmelCase : Tuple=12_81_00 , _UpperCAmelCase : str=15_36 , _UpperCAmelCase : List[Any]=24 , _UpperCAmelCase : List[Any]=24 , _UpperCAmelCase : List[str]=61_44 , _UpperCAmelCase : Any="gelu" , _UpperCAmelCase : int=0.1 , _UpperCAmelCase : Dict=0.1 , _UpperCAmelCase : Any=5_12 , _UpperCAmelCase : List[Any]=0 , _UpperCAmelCase : Optional[Any]=0.02 , _UpperCAmelCase : str=1E-7 , _UpperCAmelCase : Any=False , _UpperCAmelCase : Tuple=-1 , _UpperCAmelCase : str=0 , _UpperCAmelCase : int=True , _UpperCAmelCase : str=None , _UpperCAmelCase : Union[str, Any]=0 , _UpperCAmelCase : Union[str, Any]="gelu" , **_UpperCAmelCase : str , ): """simple docstring""" super().__init__(**_UpperCAmelCase ) UpperCAmelCase__ = hidden_size UpperCAmelCase__ = num_hidden_layers UpperCAmelCase__ = num_attention_heads UpperCAmelCase__ = intermediate_size UpperCAmelCase__ = hidden_act UpperCAmelCase__ = hidden_dropout_prob UpperCAmelCase__ = attention_probs_dropout_prob UpperCAmelCase__ = max_position_embeddings UpperCAmelCase__ = type_vocab_size UpperCAmelCase__ = initializer_range UpperCAmelCase__ = relative_attention UpperCAmelCase__ = max_relative_positions UpperCAmelCase__ = pad_token_id UpperCAmelCase__ = position_biased_input # Backwards compatibility if type(_UpperCAmelCase ) == str: UpperCAmelCase__ = [x.strip() for x in pos_att_type.lower().split("""|""" )] UpperCAmelCase__ = pos_att_type UpperCAmelCase__ = vocab_size UpperCAmelCase__ = layer_norm_eps UpperCAmelCase__ = kwargs.get("""pooler_hidden_size""" , _UpperCAmelCase ) UpperCAmelCase__ = pooler_dropout UpperCAmelCase__ = pooler_hidden_act class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' @property def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): """simple docstring""" if self.task == "multiple-choice": UpperCAmelCase__ = {0: """batch""", 1: """choice""", 2: """sequence"""} else: UpperCAmelCase__ = {0: """batch""", 1: """sequence"""} if self._config.type_vocab_size > 0: return OrderedDict( [("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] ) else: return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] ) @property def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" return 12 def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , _UpperCAmelCase : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , _UpperCAmelCase : int = -1 , _UpperCAmelCase : int = -1 , _UpperCAmelCase : int = -1 , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional["TensorType"] = None , _UpperCAmelCase : int = 3 , _UpperCAmelCase : int = 40 , _UpperCAmelCase : int = 40 , _UpperCAmelCase : "PreTrainedTokenizerBase" = None , ): """simple docstring""" UpperCAmelCase__ = super().generate_dummy_inputs(preprocessor=_UpperCAmelCase , framework=_UpperCAmelCase ) if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs: del dummy_inputs["token_type_ids"] return dummy_inputs
346
'''simple docstring''' from math import factorial def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int = 20 ): '''simple docstring''' UpperCAmelCase__ = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1, # 2, 3,... UpperCAmelCase__ = n // 2 return int(factorial(SCREAMING_SNAKE_CASE__ ) / (factorial(SCREAMING_SNAKE_CASE__ ) * factorial(n - k )) ) if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution(2_0)) else: try: UpperCAmelCase_ = int(sys.argv[1]) print(solution(n)) except ValueError: print('Invalid entry - please enter a number.')
346
1
"""simple docstring""" def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = n ** (1 / 3) return (val * val * val) == n if __name__ == "__main__": print(perfect_cube(27)) print(perfect_cube(4))
352
def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = n ** (1 / 3) return (val * val * val) == n if __name__ == "__main__": print(perfect_cube(27)) print(perfect_cube(4))
319
0
from typing import Optional, Tuple, Union import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from .modeling_flax_utils import FlaxModelMixin from .unet_ad_blocks_flax import ( FlaxCrossAttnDownBlockaD, FlaxDownBlockaD, FlaxUNetMidBlockaDCrossAttn, ) @flax.struct.dataclass class _a ( UpperCamelCase__ ): _lowercase : jnp.ndarray _lowercase : jnp.ndarray class _a ( nn.Module ): _lowercase : int _lowercase : Tuple[int] = (16, 32, 96, 256) _lowercase : jnp.dtype = jnp.floataa def lowerCamelCase_ ( self: List[Any] ) -> Optional[int]: """simple docstring""" lowercase__ = nn.Conv( self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) lowercase__ = [] for i in range(len(self.block_out_channels ) - 1 ): lowercase__ = self.block_out_channels[i] lowercase__ = self.block_out_channels[i + 1] lowercase__ = nn.Conv( UpperCamelCase_ , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) blocks.append(UpperCamelCase_ ) lowercase__ = nn.Conv( UpperCamelCase_ , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) blocks.append(UpperCamelCase_ ) lowercase__ = blocks lowercase__ = nn.Conv( self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) def __call__( self: Optional[int] , UpperCamelCase_: Optional[int] ) -> List[Any]: """simple docstring""" lowercase__ = self.conv_in(UpperCamelCase_ ) lowercase__ = nn.silu(UpperCamelCase_ ) for block in self.blocks: lowercase__ = block(UpperCamelCase_ ) lowercase__ = nn.silu(UpperCamelCase_ ) lowercase__ = self.conv_out(UpperCamelCase_ ) return embedding @flax_register_to_config class _a ( nn.Module , UpperCamelCase__ , UpperCamelCase__ ): _lowercase : int = 32 _lowercase : int = 4 _lowercase : Tuple[str] = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ) _lowercase : Union[bool, Tuple[bool]] = False _lowercase : Tuple[int] = (320, 640, 1280, 1280) _lowercase : int = 2 _lowercase : Union[int, Tuple[int]] = 8 _lowercase : Optional[Union[int, Tuple[int]]] = None _lowercase : int = 1280 _lowercase : float = 0.0 _lowercase : bool = False _lowercase : jnp.dtype = jnp.floataa _lowercase : bool = True _lowercase : int = 0 _lowercase : str = "rgb" _lowercase : Tuple[int] = (16, 32, 96, 256) def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: jax.random.KeyArray ) -> FrozenDict: """simple docstring""" lowercase__ = (1, self.in_channels, self.sample_size, self.sample_size) lowercase__ = jnp.zeros(UpperCamelCase_ , dtype=jnp.floataa ) lowercase__ = jnp.ones((1,) , dtype=jnp.intaa ) lowercase__ = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa ) lowercase__ = (1, 3, self.sample_size * 8, self.sample_size * 8) lowercase__ = jnp.zeros(UpperCamelCase_ , dtype=jnp.floataa ) lowercase__ , lowercase__ = jax.random.split(UpperCamelCase_ ) lowercase__ = {'''params''': params_rng, '''dropout''': dropout_rng} return self.init(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )["params"] def lowerCamelCase_ ( self: Optional[int] ) -> List[str]: """simple docstring""" lowercase__ = self.block_out_channels lowercase__ = block_out_channels[0] * 4 # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. lowercase__ = self.num_attention_heads or self.attention_head_dim # input lowercase__ = nn.Conv( block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) # time lowercase__ = FlaxTimesteps( block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift ) lowercase__ = FlaxTimestepEmbedding(UpperCamelCase_ , dtype=self.dtype ) lowercase__ = FlaxControlNetConditioningEmbedding( conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , ) lowercase__ = self.only_cross_attention if isinstance(UpperCamelCase_ , UpperCamelCase_ ): lowercase__ = (only_cross_attention,) * len(self.down_block_types ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ): lowercase__ = (num_attention_heads,) * len(self.down_block_types ) # down lowercase__ = [] lowercase__ = [] lowercase__ = block_out_channels[0] lowercase__ = nn.Conv( UpperCamelCase_ , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) controlnet_down_blocks.append(UpperCamelCase_ ) for i, down_block_type in enumerate(self.down_block_types ): lowercase__ = output_channel lowercase__ = block_out_channels[i] lowercase__ = i == len(UpperCamelCase_ ) - 1 if down_block_type == "CrossAttnDownBlock2D": lowercase__ = FlaxCrossAttnDownBlockaD( in_channels=UpperCamelCase_ , out_channels=UpperCamelCase_ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , ) else: lowercase__ = FlaxDownBlockaD( in_channels=UpperCamelCase_ , out_channels=UpperCamelCase_ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , ) down_blocks.append(UpperCamelCase_ ) for _ in range(self.layers_per_block ): lowercase__ = nn.Conv( UpperCamelCase_ , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) controlnet_down_blocks.append(UpperCamelCase_ ) if not is_final_block: lowercase__ = nn.Conv( UpperCamelCase_ , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) controlnet_down_blocks.append(UpperCamelCase_ ) lowercase__ = down_blocks lowercase__ = controlnet_down_blocks # mid lowercase__ = block_out_channels[-1] lowercase__ = FlaxUNetMidBlockaDCrossAttn( in_channels=UpperCamelCase_ , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , ) lowercase__ = nn.Conv( UpperCamelCase_ , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) def __call__( self: Dict , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Any , UpperCamelCase_: Optional[Any] , UpperCamelCase_: List[Any] , UpperCamelCase_: float = 1.0 , UpperCamelCase_: bool = True , UpperCamelCase_: bool = False , ) -> Union[FlaxControlNetOutput, Tuple]: """simple docstring""" lowercase__ = self.controlnet_conditioning_channel_order if channel_order == "bgr": lowercase__ = jnp.flip(UpperCamelCase_ , axis=1 ) # 1. time if not isinstance(UpperCamelCase_ , jnp.ndarray ): lowercase__ = jnp.array([timesteps] , dtype=jnp.intaa ) elif isinstance(UpperCamelCase_ , jnp.ndarray ) and len(timesteps.shape ) == 0: lowercase__ = timesteps.astype(dtype=jnp.floataa ) lowercase__ = jnp.expand_dims(UpperCamelCase_ , 0 ) lowercase__ = self.time_proj(UpperCamelCase_ ) lowercase__ = self.time_embedding(UpperCamelCase_ ) # 2. pre-process lowercase__ = jnp.transpose(UpperCamelCase_ , (0, 2, 3, 1) ) lowercase__ = self.conv_in(UpperCamelCase_ ) lowercase__ = jnp.transpose(UpperCamelCase_ , (0, 2, 3, 1) ) lowercase__ = self.controlnet_cond_embedding(UpperCamelCase_ ) sample += controlnet_cond # 3. down lowercase__ = (sample,) for down_block in self.down_blocks: if isinstance(UpperCamelCase_ , UpperCamelCase_ ): lowercase__ , lowercase__ = down_block(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , deterministic=not train ) else: lowercase__ , lowercase__ = down_block(UpperCamelCase_ , UpperCamelCase_ , deterministic=not train ) down_block_res_samples += res_samples # 4. mid lowercase__ = self.mid_block(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , deterministic=not train ) # 5. contronet blocks lowercase__ = () for down_block_res_sample, controlnet_block in zip(UpperCamelCase_ , self.controlnet_down_blocks ): lowercase__ = controlnet_block(UpperCamelCase_ ) controlnet_down_block_res_samples += (down_block_res_sample,) lowercase__ = controlnet_down_block_res_samples lowercase__ = self.controlnet_mid_block(UpperCamelCase_ ) # 6. scaling lowercase__ = [sample * conditioning_scale for sample in down_block_res_samples] mid_block_res_sample *= conditioning_scale if not return_dict: return (down_block_res_samples, mid_block_res_sample) return FlaxControlNetOutput( down_block_res_samples=UpperCamelCase_ , mid_block_res_sample=UpperCamelCase_ )
110
from pickle import UnpicklingError import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict from ..utils import logging lowerCAmelCase = logging.get_logger(__name__) def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): """simple docstring""" try: with open(SCREAMING_SNAKE_CASE , '''rb''' ) as flax_state_f: lowercase__ = from_bytes(SCREAMING_SNAKE_CASE , flax_state_f.read() ) except UnpicklingError as e: try: with open(SCREAMING_SNAKE_CASE ) as f: if f.read().startswith('''version''' ): raise OSError( '''You seem to have cloned a repository without having git-lfs installed. Please''' ''' install git-lfs and run `git lfs install` followed by `git lfs pull` in the''' ''' folder you cloned.''' ) else: raise ValueError from e except (UnicodeDecodeError, ValueError): raise EnvironmentError(f'Unable to convert {model_file} to Flax deserializable object. ' ) return load_flax_weights_in_pytorch_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): """simple docstring""" try: import torch # noqa: F401 except ImportError: logger.error( '''Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see''' ''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation''' ''' instructions.''' ) raise # check if we have bf16 weights lowercase__ = flatten_dict(jax.tree_util.tree_map(lambda SCREAMING_SNAKE_CASE : x.dtype == jnp.bfloataa , SCREAMING_SNAKE_CASE ) ).values() if any(SCREAMING_SNAKE_CASE ): # convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( '''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` ''' '''before loading those in PyTorch model.''' ) lowercase__ = jax.tree_util.tree_map( lambda SCREAMING_SNAKE_CASE : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , SCREAMING_SNAKE_CASE ) lowercase__ = '''''' lowercase__ = flatten_dict(SCREAMING_SNAKE_CASE , sep='''.''' ) lowercase__ = pt_model.state_dict() # keep track of unexpected & missing keys lowercase__ = [] lowercase__ = set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): lowercase__ = flax_key_tuple.split('''.''' ) if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4: lowercase__ = flax_key_tuple_array[:-1] + ['''weight'''] lowercase__ = jnp.transpose(SCREAMING_SNAKE_CASE , (3, 2, 0, 1) ) elif flax_key_tuple_array[-1] == "kernel": lowercase__ = flax_key_tuple_array[:-1] + ['''weight'''] lowercase__ = flax_tensor.T elif flax_key_tuple_array[-1] == "scale": lowercase__ = flax_key_tuple_array[:-1] + ['''weight'''] if "time_embedding" not in flax_key_tuple_array: for i, flax_key_tuple_string in enumerate(SCREAMING_SNAKE_CASE ): lowercase__ = ( flax_key_tuple_string.replace('''_0''' , '''.0''' ) .replace('''_1''' , '''.1''' ) .replace('''_2''' , '''.2''' ) .replace('''_3''' , '''.3''' ) .replace('''_4''' , '''.4''' ) .replace('''_5''' , '''.5''' ) .replace('''_6''' , '''.6''' ) .replace('''_7''' , '''.7''' ) .replace('''_8''' , '''.8''' ) .replace('''_9''' , '''.9''' ) ) lowercase__ = '''.'''.join(SCREAMING_SNAKE_CASE ) if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( f'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected ' f'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' ) else: # add weight to pytorch dict lowercase__ = np.asarray(SCREAMING_SNAKE_CASE ) if not isinstance(SCREAMING_SNAKE_CASE , np.ndarray ) else flax_tensor lowercase__ = torch.from_numpy(SCREAMING_SNAKE_CASE ) # remove from missing keys missing_keys.remove(SCREAMING_SNAKE_CASE ) else: # weight is not expected by PyTorch model unexpected_keys.append(SCREAMING_SNAKE_CASE ) pt_model.load_state_dict(SCREAMING_SNAKE_CASE ) # re-transform missing_keys to list lowercase__ = list(SCREAMING_SNAKE_CASE ) if len(SCREAMING_SNAKE_CASE ) > 0: logger.warning( '''Some weights of the Flax model were not used when initializing the PyTorch model''' f' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing' f' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture' ''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This''' f' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect' ''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a''' ''' FlaxBertForSequenceClassification model).''' ) if len(SCREAMING_SNAKE_CASE ) > 0: logger.warning( f'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly' f' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to' ''' use it for predictions and inference.''' ) return pt_model
110
1
import torch from diffusers import DDPMScheduler from .test_schedulers import SchedulerCommonTest class lowerCamelCase_ ( a_ ): SCREAMING_SNAKE_CASE_ = (DDPMScheduler,) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ,**__lowerCamelCase : Tuple ): '''simple docstring''' a = { '''num_train_timesteps''': 10_00, '''beta_start''': 0.0_001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''variance_type''': '''fixed_small''', '''clip_sample''': True, } config.update(**__lowerCamelCase ) return config def SCREAMING_SNAKE_CASE_ ( self : Tuple ): '''simple docstring''' for timesteps in [1, 5, 1_00, 10_00]: self.check_over_configs(num_train_timesteps=__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): '''simple docstring''' for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] ,[0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=__lowerCamelCase ,beta_end=__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): '''simple docstring''' for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ): '''simple docstring''' for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): '''simple docstring''' for clip_sample in [True, False]: self.check_over_configs(clip_sample=__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): '''simple docstring''' self.check_over_configs(thresholding=__lowerCamelCase ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=__lowerCamelCase ,prediction_type=__lowerCamelCase ,sample_max_value=__lowerCamelCase ,) def SCREAMING_SNAKE_CASE_ ( self : Dict ): '''simple docstring''' for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): '''simple docstring''' for t in [0, 5_00, 9_99]: self.check_over_forward(time_step=__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): '''simple docstring''' a = self.scheduler_classes[0] a = self.get_scheduler_config() a = scheduler_class(**__lowerCamelCase ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.00_979 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.02 ) ) < 1e-5 def SCREAMING_SNAKE_CASE_ ( self : str ): '''simple docstring''' a = self.scheduler_classes[0] a = self.get_scheduler_config() a = scheduler_class(**__lowerCamelCase ) a = len(__lowerCamelCase ) a = self.dummy_model() a = self.dummy_sample_deter a = torch.manual_seed(0 ) for t in reversed(range(__lowerCamelCase ) ): # 1. predict noise residual a = model(__lowerCamelCase ,__lowerCamelCase ) # 2. predict previous mean of sample x_t-1 a = scheduler.step(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,generator=__lowerCamelCase ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance a = pred_prev_sample a = torch.sum(torch.abs(__lowerCamelCase ) ) a = torch.mean(torch.abs(__lowerCamelCase ) ) assert abs(result_sum.item() - 258.9_606 ) < 1e-2 assert abs(result_mean.item() - 0.3_372 ) < 1e-3 def SCREAMING_SNAKE_CASE_ ( self : Tuple ): '''simple docstring''' a = self.scheduler_classes[0] a = self.get_scheduler_config(prediction_type='''v_prediction''' ) a = scheduler_class(**__lowerCamelCase ) a = len(__lowerCamelCase ) a = self.dummy_model() a = self.dummy_sample_deter a = torch.manual_seed(0 ) for t in reversed(range(__lowerCamelCase ) ): # 1. predict noise residual a = model(__lowerCamelCase ,__lowerCamelCase ) # 2. predict previous mean of sample x_t-1 a = scheduler.step(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,generator=__lowerCamelCase ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance a = pred_prev_sample a = torch.sum(torch.abs(__lowerCamelCase ) ) a = torch.mean(torch.abs(__lowerCamelCase ) ) assert abs(result_sum.item() - 202.0_296 ) < 1e-2 assert abs(result_mean.item() - 0.2_631 ) < 1e-3 def SCREAMING_SNAKE_CASE_ ( self : Dict ): '''simple docstring''' a = self.scheduler_classes[0] a = self.get_scheduler_config() a = scheduler_class(**__lowerCamelCase ) a = [1_00, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=__lowerCamelCase ) a = scheduler.timesteps for i, timestep in enumerate(__lowerCamelCase ): if i == len(__lowerCamelCase ) - 1: a = -1 else: a = timesteps[i + 1] a = scheduler.previous_timestep(__lowerCamelCase ) a = prev_t.item() self.assertEqual(__lowerCamelCase ,__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): '''simple docstring''' a = self.scheduler_classes[0] a = self.get_scheduler_config() a = scheduler_class(**__lowerCamelCase ) a = [1_00, 87, 50, 51, 0] with self.assertRaises(__lowerCamelCase ,msg='''`custom_timesteps` must be in descending order.''' ): scheduler.set_timesteps(timesteps=__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ): '''simple docstring''' a = self.scheduler_classes[0] a = self.get_scheduler_config() a = scheduler_class(**__lowerCamelCase ) a = [1_00, 87, 50, 1, 0] a = len(__lowerCamelCase ) with self.assertRaises(__lowerCamelCase ,msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ): scheduler.set_timesteps(num_inference_steps=__lowerCamelCase ,timesteps=__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): '''simple docstring''' a = self.scheduler_classes[0] a = self.get_scheduler_config() a = scheduler_class(**__lowerCamelCase ) a = [scheduler.config.num_train_timesteps] with self.assertRaises( __lowerCamelCase ,msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' ,): scheduler.set_timesteps(timesteps=__lowerCamelCase )
352
import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## UpperCamelCase__ : Union[str, Any] = 16 UpperCamelCase__ : Dict = 32 def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ = 1_6 ) -> Tuple: """simple docstring""" a = AutoTokenizer.from_pretrained('''bert-base-cased''' ) a = load_dataset('''glue''', '''mrpc''' ) def tokenize_function(snake_case_ ): # max_length=None => use the model max length (it's actually the default) a = tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=snake_case_, max_length=snake_case_ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): a = datasets.map( snake_case_, batched=snake_case_, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library a = tokenized_datasets.rename_column('''label''', '''labels''' ) def collate_fn(snake_case_ ): # On TPU it's best to pad everything to the same length or training will be very slow. a = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": a = 1_6 elif accelerator.mixed_precision != "no": a = 8 else: a = None return tokenizer.pad( snake_case_, padding='''longest''', max_length=snake_case_, pad_to_multiple_of=snake_case_, return_tensors='''pt''', ) # Instantiate dataloaders. a = DataLoader( tokenized_datasets['''train'''], shuffle=snake_case_, collate_fn=snake_case_, batch_size=snake_case_ ) a = DataLoader( tokenized_datasets['''validation'''], shuffle=snake_case_, collate_fn=snake_case_, batch_size=snake_case_ ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders UpperCamelCase__ : int = mocked_dataloaders # noqa: F811 def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> List[Any]: """simple docstring""" if os.environ.get('''TESTING_MOCKED_DATALOADERS''', snake_case_ ) == "1": a = 2 # Initialize accelerator a = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs a = config['''lr'''] a = int(config['''num_epochs'''] ) a = int(config['''seed'''] ) a = int(config['''batch_size'''] ) a = evaluate.load('''glue''', '''mrpc''' ) # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=snake_case_ ) def inner_training_loop(snake_case_ ): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(snake_case_ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) a = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''', return_dict=snake_case_ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). a = model.to(accelerator.device ) # Instantiate optimizer a = AdamW(params=model.parameters(), lr=snake_case_ ) a , a = get_dataloaders(snake_case_, snake_case_ ) # Instantiate scheduler a = get_linear_schedule_with_warmup( optimizer=snake_case_, num_warmup_steps=1_0_0, num_training_steps=(len(snake_case_ ) * num_epochs), ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. a , a , a , a , a = accelerator.prepare( snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ ) # Now we train the model for epoch in range(snake_case_ ): model.train() for step, batch in enumerate(snake_case_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) a = model(**snake_case_ ) a = outputs.loss accelerator.backward(snake_case_ ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(snake_case_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): a = model(**snake_case_ ) a = outputs.logits.argmax(dim=-1 ) a , a = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=snake_case_, references=snake_case_, ) a = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"""epoch {epoch}:""", snake_case_ ) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() def SCREAMING_SNAKE_CASE__ ( ) -> Tuple: """simple docstring""" a = argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''', type=snake_case_, default=snake_case_, choices=['''no''', '''fp16''', '''bf16''', '''fp8'''], help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''', ) parser.add_argument('''--cpu''', action='''store_true''', help='''If passed, will train on the CPU.''' ) a = parser.parse_args() a = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 4_2, '''batch_size''': 1_6} training_function(snake_case_, snake_case_ ) if __name__ == "__main__": main()
330
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __A = {"configuration_mbart": ["MBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "MBartConfig", "MBartOnnxConfig"]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ["MBartTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ["MBartTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ "MBART_PRETRAINED_MODEL_ARCHIVE_LIST", "MBartForCausalLM", "MBartForConditionalGeneration", "MBartForQuestionAnswering", "MBartForSequenceClassification", "MBartModel", "MBartPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ "TFMBartForConditionalGeneration", "TFMBartModel", "TFMBartPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ "FlaxMBartForConditionalGeneration", "FlaxMBartForQuestionAnswering", "FlaxMBartForSequenceClassification", "FlaxMBartModel", "FlaxMBartPreTrainedModel", ] if TYPE_CHECKING: from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mbart import MBartTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mbart_fast import MBartTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mbart import ( MBART_PRETRAINED_MODEL_ARCHIVE_LIST, MBartForCausalLM, MBartForConditionalGeneration, MBartForQuestionAnswering, MBartForSequenceClassification, MBartModel, MBartPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mbart import ( FlaxMBartForConditionalGeneration, FlaxMBartForQuestionAnswering, FlaxMBartForSequenceClassification, FlaxMBartModel, FlaxMBartPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
10
"""simple docstring""" import itertools from dataclasses import dataclass from typing import List, Optional import pyarrow as pa import pyarrow.parquet as pq import datasets from datasets.table import table_cast lowercase__ = datasets.utils.logging.get_logger(__name__) @dataclass class __lowerCamelCase ( datasets.BuilderConfig ): '''simple docstring''' a_ : int = 1_0000 a_ : Optional[List[str]] = None a_ : Optional[datasets.Features] = None class __lowerCamelCase ( datasets.ArrowBasedBuilder ): '''simple docstring''' a_ : Dict = ParquetConfig def lowerCamelCase ( self : Optional[Any] ): return datasets.DatasetInfo(features=self.config.features ) def lowerCamelCase ( self : Any , a_ : Optional[Any] ): if not self.config.data_files: raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' ) lowerCAmelCase_ : List[str] = dl_manager.download_and_extract(self.config.data_files ) if isinstance(a_ , (str, list, tuple) ): lowerCAmelCase_ : str = data_files if isinstance(a_ , a_ ): lowerCAmelCase_ : Optional[Any] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive lowerCAmelCase_ : str = [dl_manager.iter_files(a_ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )] lowerCAmelCase_ : Optional[Any] = [] for split_name, files in data_files.items(): if isinstance(a_ , a_ ): lowerCAmelCase_ : Union[str, Any] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive lowerCAmelCase_ : List[str] = [dl_manager.iter_files(a_ ) for file in files] # Infer features is they are stoed in the arrow schema if self.info.features is None: for file in itertools.chain.from_iterable(a_ ): with open(a_ , "rb" ) as f: lowerCAmelCase_ : Dict = datasets.Features.from_arrow_schema(pq.read_schema(a_ ) ) break splits.append(datasets.SplitGenerator(name=a_ , gen_kwargs={"files": files} ) ) return splits def lowerCamelCase ( self : int , a_ : pa.Table ): if self.info.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example lowerCAmelCase_ : Tuple = table_cast(a_ , self.info.features.arrow_schema ) return pa_table def lowerCamelCase ( self : Dict , a_ : Dict ): lowerCAmelCase_ : Optional[int] = self.info.features.arrow_schema if self.info.features is not None else None if self.info.features is not None and self.config.columns is not None: if sorted(field.name for field in schema ) != sorted(self.config.columns ): raise ValueError( f'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''' ) for file_idx, file in enumerate(itertools.chain.from_iterable(a_ ) ): with open(a_ , "rb" ) as f: lowerCAmelCase_ : Any = pq.ParquetFile(a_ ) try: for batch_idx, record_batch in enumerate( parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ): lowerCAmelCase_ : List[str] = pa.Table.from_batches([record_batch] ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield f'''{file_idx}_{batch_idx}''', self._cast_table(a_ ) except ValueError as e: logger.error(f'''Failed to read file \'{file}\' with error {type(a_ )}: {e}''' ) raise
241
0
import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. __A = abspath(join(dirname(dirname(__file__)), 'src')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='ignore', category=FutureWarning) def __A ( _lowercase ): '''simple docstring''' from diffusers.utils.testing_utils import pytest_addoption_shared pytest_addoption_shared(_lowercase ) def __A ( _lowercase ): '''simple docstring''' from diffusers.utils.testing_utils import pytest_terminal_summary_main _A = terminalreporter.config.getoption('''--make-reports''' ) if make_reports: pytest_terminal_summary_main(_lowercase , id=_lowercase )
359
# coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import sys import transformers __A = '3' print('Python version:', sys.version) print('transformers version:', transformers.__version__) try: import torch print('Torch version:', torch.__version__) print('Cuda available:', torch.cuda.is_available()) print('Cuda version:', torch.version.cuda) print('CuDNN version:', torch.backends.cudnn.version()) print('Number of GPUs available:', torch.cuda.device_count()) print('NCCL version:', torch.cuda.nccl.version()) except ImportError: print('Torch version:', None) try: import deepspeed print('DeepSpeed version:', deepspeed.__version__) except ImportError: print('DeepSpeed version:', None) try: import tensorflow as tf print('TensorFlow version:', tf.__version__) print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU'))) print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU'))) except ImportError: print('TensorFlow version:', None)
75
0
"""simple docstring""" import inspect from typing import Callable, List, Optional, Union import torch from transformers import ( CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, WhisperForConditionalGeneration, WhisperProcessor, ) from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.utils import logging __UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name class _SCREAMING_SNAKE_CASE ( A__ ): def __init__( self , __A , __A , __A , __A , __A , __A , __A , __A , __A , ) -> Optional[Any]: super().__init__() if safety_checker is None: logger.warning( f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure""" """ that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered""" """ results in services or applications open to the public. Both the diffusers team and Hugging Face""" """ strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling""" """ it only for use-cases that involve analyzing network behavior or auditing its results. For more""" """ information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" ) self.register_modules( speech_model=__A , speech_processor=__A , vae=__A , text_encoder=__A , tokenizer=__A , unet=__A , scheduler=__A , feature_extractor=__A , ) def __lowerCAmelCase ( self , __A = "auto" ) -> List[str]: if slice_size == "auto": lowerCAmelCase_ :Any = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(__A ) def __lowerCAmelCase ( self ) -> Optional[int]: self.enable_attention_slicing(__A ) @torch.no_grad() def __call__( self , __A , __A=1_6000 , __A = 512 , __A = 512 , __A = 50 , __A = 7.5 , __A = None , __A = 1 , __A = 0.0 , __A = None , __A = None , __A = "pil" , __A = True , __A = None , __A = 1 , **__A , ) -> Optional[Any]: lowerCAmelCase_ :Optional[Any] = self.speech_processor.feature_extractor( __A , return_tensors="""pt""" , sampling_rate=__A ).input_features.to(self.device ) lowerCAmelCase_ :List[Any] = self.speech_model.generate(__A , max_length=48_0000 ) lowerCAmelCase_ :Optional[int] = self.speech_processor.tokenizer.batch_decode(__A , skip_special_tokens=__A , normalize=__A )[ 0 ] if isinstance(__A , __A ): lowerCAmelCase_ :List[Any] = 1 elif isinstance(__A , __A ): lowerCAmelCase_ :Dict = len(__A ) else: raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(__A )}""" ) if height % 8 != 0 or width % 8 != 0: raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(__A , __A ) or callback_steps <= 0) ): raise ValueError( f"""`callback_steps` has to be a positive integer but is {callback_steps} of type""" f""" {type(__A )}.""" ) # get prompt text embeddings lowerCAmelCase_ :List[Any] = self.tokenizer( __A , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , ) lowerCAmelCase_ :Tuple = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: lowerCAmelCase_ :Optional[int] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( """The following part of your input was truncated because CLIP can only handle sequences up to""" f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" ) lowerCAmelCase_ :int = text_input_ids[:, : self.tokenizer.model_max_length] lowerCAmelCase_ :int = self.text_encoder(text_input_ids.to(self.device ) )[0] # duplicate text embeddings for each generation per prompt, using mps friendly method lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Dict = text_embeddings.shape lowerCAmelCase_ :Optional[Any] = text_embeddings.repeat(1 , __A , 1 ) lowerCAmelCase_ :str = text_embeddings.view(bs_embed * num_images_per_prompt , __A , -1 ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. lowerCAmelCase_ :int = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: lowerCAmelCase_ :List[str] if negative_prompt is None: lowerCAmelCase_ :Union[str, Any] = [""""""] * batch_size elif type(__A ) is not type(__A ): raise TypeError( f"""`negative_prompt` should be the same type to `prompt`, but got {type(__A )} !=""" f""" {type(__A )}.""" ) elif isinstance(__A , __A ): lowerCAmelCase_ :Optional[int] = [negative_prompt] elif batch_size != len(__A ): raise ValueError( f"""`negative_prompt`: {negative_prompt} has batch size {len(__A )}, but `prompt`:""" f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches""" """ the batch size of `prompt`.""" ) else: lowerCAmelCase_ :Dict = negative_prompt lowerCAmelCase_ :List[str] = text_input_ids.shape[-1] lowerCAmelCase_ :Dict = self.tokenizer( __A , padding="""max_length""" , max_length=__A , truncation=__A , return_tensors="""pt""" , ) lowerCAmelCase_ :Union[str, Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method lowerCAmelCase_ :Dict = uncond_embeddings.shape[1] lowerCAmelCase_ :Union[str, Any] = uncond_embeddings.repeat(1 , __A , 1 ) lowerCAmelCase_ :Optional[Any] = uncond_embeddings.view(batch_size * num_images_per_prompt , __A , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes lowerCAmelCase_ :List[str] = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. lowerCAmelCase_ :int = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) lowerCAmelCase_ :Optional[int] = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps lowerCAmelCase_ :Optional[Any] = torch.randn(__A , generator=__A , device="""cpu""" , dtype=__A ).to( self.device ) else: lowerCAmelCase_ :List[str] = torch.randn(__A , generator=__A , device=self.device , dtype=__A ) else: if latents.shape != latents_shape: raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" ) lowerCAmelCase_ :Tuple = latents.to(self.device ) # set timesteps self.scheduler.set_timesteps(__A ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand lowerCAmelCase_ :Optional[int] = self.scheduler.timesteps.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler lowerCAmelCase_ :List[Any] = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] lowerCAmelCase_ :List[Any] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) lowerCAmelCase_ :List[str] = {} if accepts_eta: lowerCAmelCase_ :Dict = eta for i, t in enumerate(self.progress_bar(__A ) ): # expand the latents if we are doing classifier free guidance lowerCAmelCase_ :int = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents lowerCAmelCase_ :Dict = self.scheduler.scale_model_input(__A , __A ) # predict the noise residual lowerCAmelCase_ :Optional[int] = self.unet(__A , __A , encoder_hidden_states=__A ).sample # perform guidance if do_classifier_free_guidance: lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = noise_pred.chunk(2 ) lowerCAmelCase_ :Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 lowerCAmelCase_ :int = self.scheduler.step(__A , __A , __A , **__A ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(__A , __A , __A ) lowerCAmelCase_ :str = 1 / 0.1_8_2_1_5 * latents lowerCAmelCase_ :Union[str, Any] = self.vae.decode(__A ).sample lowerCAmelCase_ :List[str] = (image / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 lowerCAmelCase_ :int = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": lowerCAmelCase_ :Dict = self.numpy_to_pil(__A ) if not return_dict: return image return StableDiffusionPipelineOutput(images=__A , nsfw_content_detected=__A )
84
"""simple docstring""" def _snake_case ( lowercase__ : int = 1_0 ) -> str: '''simple docstring''' if not isinstance(lowercase__ , lowercase__ ) or n < 0: raise ValueError("""Invalid input""" ) lowerCAmelCase_ :List[str] = 1_0**n lowerCAmelCase_ :int = 2_8_4_3_3 * (pow(2 , 7_8_3_0_4_5_7 , lowercase__ )) + 1 return str(number % modulus ) if __name__ == "__main__": from doctest import testmod testmod() print(F"""{solution(10) = }""")
84
1
"""simple docstring""" import random from typing import Any def _snake_case ( lowerCamelCase__ : List[str] ) -> list[Any]: for _ in range(len(__SCREAMING_SNAKE_CASE ) ): lowerCamelCase_ : List[Any] =random.randint(0 , len(__SCREAMING_SNAKE_CASE ) - 1 ) lowerCamelCase_ : Optional[Any] =random.randint(0 , len(__SCREAMING_SNAKE_CASE ) - 1 ) lowerCamelCase_ : Optional[int] =data[b], data[a] return data if __name__ == "__main__": A__ : Dict = [0, 1, 2, 3, 4, 5, 6, 7] A__ : Tuple = ['python', 'says', 'hello', '!'] print('Fisher-Yates Shuffle:') print('List', integers, strings) print('FY Shuffle', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
370
"""simple docstring""" def _snake_case ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : list[int] ) -> bool: # 1. Validate that path exists between current and next vertices if graph[path[curr_ind - 1]][next_ver] == 0: return False # 2. Validate that next vertex is not already in path return not any(vertex == next_ver for vertex in path ) def _snake_case ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : list[int] , lowerCamelCase__ : int ) -> bool: # Base Case if curr_ind == len(lowerCamelCase__ ): # return whether path exists between current and starting vertices return graph[path[curr_ind - 1]][path[0]] == 1 # Recursive Step for next_ver in range(0 , len(lowerCamelCase__ ) ): if valid_connection(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): # Insert current vertex into path as next transition lowerCamelCase_ : Tuple =next_ver # Validate created path if util_hamilton_cycle(lowerCamelCase__ , lowerCamelCase__ , curr_ind + 1 ): return True # Backtrack lowerCamelCase_ : int =-1 return False def _snake_case ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : int = 0 ) -> list[int]: lowerCamelCase_ : Optional[Any] =[-1] * (len(lowerCamelCase__ ) + 1) # initialize start and end of path with starting index lowerCamelCase_ : Optional[int] =start_index # evaluate and if we find answer return path either return empty array return path if util_hamilton_cycle(lowerCamelCase__ , lowerCamelCase__ , 1 ) else []
209
0
def lowerCamelCase__ ( _A , _A ): '''simple docstring''' print("\nThe shortest path matrix using Floyd Warshall algorithm\n" ) for i in range(A__ ): for j in range(A__ ): if dist[i][j] != float("inf" ): print(int(dist[i][j] ) , end="\t" ) else: print("INF" , end="\t" ) print() def lowerCamelCase__ ( _A , _A ): '''simple docstring''' snake_case_ = [[float("inf" ) for _ in range(A__ )] for _ in range(A__ )] for i in range(A__ ): for j in range(A__ ): snake_case_ = graph[i][j] # check vertex k against all other vertices (i, j) for k in range(A__ ): # looping through rows of graph array for i in range(A__ ): # looping through columns of graph array for j in range(A__ ): if ( dist[i][k] != float("inf" ) and dist[k][j] != float("inf" ) and dist[i][k] + dist[k][j] < dist[i][j] ): snake_case_ = dist[i][k] + dist[k][j] _print_dist(A__ , A__ ) return dist, v if __name__ == "__main__": lowercase__ : Any = int(input("Enter number of vertices: ")) lowercase__ : Tuple = int(input("Enter number of edges: ")) lowercase__ : Union[str, Any] = [[float("inf") for i in range(v)] for j in range(v)] for i in range(v): lowercase__ : Optional[Any] = 0.0 # src and dst are indices that must be within the array size graph[e][v] # failure to follow this will result in an error for i in range(e): print("\nEdge ", i + 1) lowercase__ : Union[str, Any] = int(input("Enter source:")) lowercase__ : Any = int(input("Enter destination:")) lowercase__ : Union[str, Any] = float(input("Enter weight:")) lowercase__ : Any = weight floyd_warshall(graph, v) # Example Input # Enter number of vertices: 3 # Enter number of edges: 2 # # generated graph from vertex and edge inputs # [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]] # [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]] # specify source, destination and weight for edge #1 # Edge 1 # Enter source:1 # Enter destination:2 # Enter weight:2 # specify source, destination and weight for edge #2 # Edge 2 # Enter source:2 # Enter destination:1 # Enter weight:1 # # Expected Output from the vertice, edge and src, dst, weight inputs!! # 0 INF INF # INF 0 2 # INF 1 0
187
"""simple docstring""" def snake_case ( A__ ,A__ ): # "extended trapezoidal rule" # int(f) = dx/2 * (f1 + 2f2 + ... + fn) UpperCAmelCase_ : Dict = (boundary[1] - boundary[0]) / steps UpperCAmelCase_ : Optional[int] = boundary[0] UpperCAmelCase_ : str = boundary[1] UpperCAmelCase_ : Tuple = make_points(A__ ,A__ ,A__ ) UpperCAmelCase_ : List[str] = 0.0 y += (h / 2.0) * f(A__ ) for i in x_i: # print(i) y += h * f(A__ ) y += (h / 2.0) * f(A__ ) return y def snake_case ( A__ ,A__ ,A__ ): UpperCAmelCase_ : Union[str, Any] = a + h while x < (b - h): yield x UpperCAmelCase_ : Optional[Any] = x + h def snake_case ( A__ ): # enter your function here UpperCAmelCase_ : Dict = (x - 0) * (x - 0) return y def snake_case ( ): UpperCAmelCase_ : Dict = 0.0 # Lower bound of integration UpperCAmelCase_ : Optional[int] = 1.0 # Upper bound of integration UpperCAmelCase_ : Dict = 10.0 # define number of steps or resolution UpperCAmelCase_ : List[Any] = [a, b] # define boundary of integration UpperCAmelCase_ : Union[str, Any] = method_a(A__ ,A__ ) print(F"""y = {y}""" ) if __name__ == "__main__": main()
268
0
"""simple docstring""" def __UpperCAmelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : int ) -> list: '''simple docstring''' __snake_case : str = word.split() def justify(UpperCAmelCase_ : list , UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> str: __snake_case : Tuple = max_width - width __snake_case : List[str] = len(UpperCAmelCase_ ) if len(UpperCAmelCase_ ) == 1: # if there is only word in line # just insert overall_spaces_count for the remainder of line return line[0] + " " * overall_spaces_count else: __snake_case : List[str] = words_count - 1 # num_spaces_between_words_list[i] : tells you to insert # num_spaces_between_words_list[i] spaces # after word on line[i] __snake_case : Tuple = spaces_to_insert_between_words * [ overall_spaces_count // spaces_to_insert_between_words ] __snake_case : Tuple = ( overall_spaces_count % spaces_to_insert_between_words ) # distribute spaces via round robin to the left words for i in range(UpperCAmelCase_ ): num_spaces_between_words_list[i] += 1 __snake_case : List[str] = [] for i in range(UpperCAmelCase_ ): # add the word aligned_words_list.append(line[i] ) # add the spaces to insert aligned_words_list.append(num_spaces_between_words_list[i] * ' ' ) # just add the last word to the sentence aligned_words_list.append(line[-1] ) # join the aligned words list to form a justified line return "".join(UpperCAmelCase_ ) __snake_case : str = [] __snake_case : list[str] = [] __snake_case : Optional[int] = 0 for word in words: if width + len(UpperCAmelCase_ ) + len(UpperCAmelCase_ ) <= max_width: # keep adding words until we can fill out max_width # width = sum of length of all words (without overall_spaces_count) # len(word) = length of current word # len(line) = number of overall_spaces_count to insert between words line.append(UpperCAmelCase_ ) width += len(UpperCAmelCase_ ) else: # justify the line and add it to result answer.append(justify(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ) # reset new line and new width __snake_case : Dict = [word], len(UpperCAmelCase_ ) __snake_case : Dict = max_width - width - len(UpperCAmelCase_ ) answer.append(' '.join(UpperCAmelCase_ ) + (remaining_spaces + 1) * ' ' ) return answer if __name__ == "__main__": from doctest import testmod testmod()
360
"""simple docstring""" import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionPipeline from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device _a : int= False class UpperCamelCase ( unittest.TestCase ): pass @nightly @require_torch_gpu class UpperCamelCase ( unittest.TestCase ): def _lowercase (self : Optional[Any]) -> Tuple: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowercase (self : int) -> Tuple: __snake_case : str = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa) pipe.to(_A) pipe.set_progress_bar_config(disable=_A) __snake_case : int = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg') __snake_case : List[Any] = torch.manual_seed(0) __snake_case : Optional[int] = pipe.dual_guided( prompt='first prompt' , image=_A , text_to_image_strength=0.75 , generator=_A , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(_A) __snake_case : Optional[Any] = VersatileDiffusionPipeline.from_pretrained(_A , torch_dtype=torch.floataa) pipe.to(_A) pipe.set_progress_bar_config(disable=_A) __snake_case : Tuple = generator.manual_seed(0) __snake_case : int = pipe.dual_guided( prompt='first prompt' , image=_A , text_to_image_strength=0.75 , generator=_A , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images assert np.abs(image - new_image).sum() < 1E-5, "Models don't have the same forward pass" def _lowercase (self : Optional[Any]) -> Optional[int]: __snake_case : Optional[Any] = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa) pipe.to(_A) pipe.set_progress_bar_config(disable=_A) __snake_case : Tuple = 'cyberpunk 2077' __snake_case : int = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg') __snake_case : str = torch.manual_seed(0) __snake_case : Union[str, Any] = pipe.dual_guided( prompt=_A , image=_A , text_to_image_strength=0.75 , generator=_A , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images __snake_case : Optional[Any] = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) __snake_case : Tuple = np.array([0.1_448, 0.1_619, 0.1_741, 0.1_086, 0.1_147, 0.1_128, 0.1_199, 0.1_165, 0.1_001]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1 __snake_case : List[str] = 'A painting of a squirrel eating a burger ' __snake_case : str = torch.manual_seed(0) __snake_case : str = pipe.text_to_image( prompt=_A , generator=_A , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy').images __snake_case : str = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) __snake_case : Any = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1 __snake_case : List[str] = pipe.image_variation(_A , generator=_A , output_type='numpy').images __snake_case : Dict = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) __snake_case : List[Any] = np.array([0.3_076, 0.3_123, 0.3_284, 0.3_782, 0.3_770, 0.3_894, 0.4_297, 0.4_331, 0.4_456]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
95
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) a ={ """configuration_vision_encoder_decoder""": ["""VisionEncoderDecoderConfig""", """VisionEncoderDecoderOnnxConfig"""] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a =["""VisionEncoderDecoderModel"""] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a =["""TFVisionEncoderDecoderModel"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a =["""FlaxVisionEncoderDecoderModel"""] if TYPE_CHECKING: from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel else: import sys a =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
73
import numpy as np from scipy.spatial.distance import cdist from sklearn.metrics import fa_score import datasets a ="""\ @inproceedings{kakwani2020indicnlpsuite, title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}}, author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar}, year={2020}, booktitle={Findings of EMNLP}, } """ a ="""\ IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te. """ a =""" Compute IndicGLUE evaluation metric associated to each IndicGLUE dataset. Args: predictions: list of predictions to score (as int64), except for 'cvit-mkb-clsr' where each prediction is a vector (of float32). references: list of ground truth labels corresponding to the predictions (as int64), except for 'cvit-mkb-clsr' where each reference is a vector (of float32). Returns: depending on the IndicGLUE subset, one or several of: \"accuracy\": Accuracy \"f1\": F1 score \"precision\": Precision@10 Examples: >>> indic_glue_metric = datasets.load_metric('indic_glue', 'wnli') # 'wnli' or any of [\"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\"] >>> references = [0, 1] >>> predictions = [0, 1] >>> results = indic_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'accuracy': 1.0} >>> indic_glue_metric = datasets.load_metric('indic_glue', 'wiki-ner') >>> references = [0, 1] >>> predictions = [0, 1] >>> results = indic_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'accuracy': 1.0, 'f1': 1.0} >>> indic_glue_metric = datasets.load_metric('indic_glue', 'cvit-mkb-clsr') >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]] >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]] >>> results = indic_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'precision@10': 1.0} """ def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> List[str]: return float((preds == labels).mean() ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> List[str]: __lowerCamelCase : Optional[Any] = simple_accuracy(lowerCamelCase__ , lowerCamelCase__ ) __lowerCamelCase : Tuple = float(fa_score(y_true=lowerCamelCase__ , y_pred=lowerCamelCase__ ) ) return { "accuracy": acc, "f1": fa, } def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]: __lowerCamelCase : Any = np.array(lowerCamelCase__ ) __lowerCamelCase : List[Any] = np.array(lowerCamelCase__ ) __lowerCamelCase : Any = en_sentvecs.shape[0] # mean centering __lowerCamelCase : Union[str, Any] = en_sentvecs - np.mean(lowerCamelCase__ , axis=0 ) __lowerCamelCase : Dict = in_sentvecs - np.mean(lowerCamelCase__ , axis=0 ) __lowerCamelCase : Optional[int] = cdist(lowerCamelCase__ , lowerCamelCase__ , 'cosine' ) __lowerCamelCase : Optional[Any] = np.array(range(lowerCamelCase__ ) ) __lowerCamelCase : Dict = sim.argsort(axis=1 )[:, :1_0] __lowerCamelCase : Optional[int] = np.any(preds == actual[:, None] , axis=1 ) return float(matches.mean() ) @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A_ ( datasets.Metric ): def lowerCAmelCase ( self : Optional[Any]): if self.config_name not in [ "wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", "cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", "wiki-ner", ]: raise KeyError( 'You should supply a configuration name selected in ' '["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", ' '"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", ' '"wiki-ner"]') return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { 'predictions': datasets.Value('int64') if self.config_name != 'cvit-mkb-clsr' else datasets.Sequence(datasets.Value('float32')), 'references': datasets.Value('int64') if self.config_name != 'cvit-mkb-clsr' else datasets.Sequence(datasets.Value('float32')), }) ,codebase_urls=[] ,reference_urls=[] ,format='numpy' if self.config_name != 'cvit-mkb-clsr' else None ,) def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Optional[Any]): if self.config_name == "cvit-mkb-clsr": return {"precision@10": precision_at_aa(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)} elif self.config_name in ["wiki-ner"]: return acc_and_fa(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) elif self.config_name in [ "wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md", ]: return {"accuracy": simple_accuracy(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)} else: raise KeyError( 'You should supply a configuration name selected in ' '["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", ' '"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", ' '"wiki-ner"]')
73
1
"""simple docstring""" import flax.linen as nn import jax.numpy as jnp from .attention_flax import FlaxTransformeraDModel from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD class __a ( nn.Module ): SCREAMING_SNAKE_CASE__ : int SCREAMING_SNAKE_CASE__ : int SCREAMING_SNAKE_CASE__ : float = 0.0 SCREAMING_SNAKE_CASE__ : int = 1 SCREAMING_SNAKE_CASE__ : int = 1 SCREAMING_SNAKE_CASE__ : bool = True SCREAMING_SNAKE_CASE__ : bool = False SCREAMING_SNAKE_CASE__ : bool = False SCREAMING_SNAKE_CASE__ : bool = False SCREAMING_SNAKE_CASE__ : jnp.dtype = jnp.floataa def snake_case_ ( self ): _lowerCamelCase = [] _lowerCamelCase = [] for i in range(self.num_layers ): _lowerCamelCase = self.in_channels if i == 0 else self.out_channels _lowerCamelCase = FlaxResnetBlockaD( in_channels=a__ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(a__ ) _lowerCamelCase = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(a__ ) _lowerCamelCase = resnets _lowerCamelCase = attentions if self.add_downsample: _lowerCamelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self , a__ , a__ , a__ , a__=True ): _lowerCamelCase = () for resnet, attn in zip(self.resnets , self.attentions ): _lowerCamelCase = resnet(a__ , a__ , deterministic=a__ ) _lowerCamelCase = attn(a__ , a__ , deterministic=a__ ) output_states += (hidden_states,) if self.add_downsample: _lowerCamelCase = self.downsamplers_a(a__ ) output_states += (hidden_states,) return hidden_states, output_states class __a ( nn.Module ): SCREAMING_SNAKE_CASE__ : int SCREAMING_SNAKE_CASE__ : int SCREAMING_SNAKE_CASE__ : float = 0.0 SCREAMING_SNAKE_CASE__ : int = 1 SCREAMING_SNAKE_CASE__ : bool = True SCREAMING_SNAKE_CASE__ : jnp.dtype = jnp.floataa def snake_case_ ( self ): _lowerCamelCase = [] for i in range(self.num_layers ): _lowerCamelCase = self.in_channels if i == 0 else self.out_channels _lowerCamelCase = FlaxResnetBlockaD( in_channels=a__ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(a__ ) _lowerCamelCase = resnets if self.add_downsample: _lowerCamelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self , a__ , a__ , a__=True ): _lowerCamelCase = () for resnet in self.resnets: _lowerCamelCase = resnet(a__ , a__ , deterministic=a__ ) output_states += (hidden_states,) if self.add_downsample: _lowerCamelCase = self.downsamplers_a(a__ ) output_states += (hidden_states,) return hidden_states, output_states class __a ( nn.Module ): SCREAMING_SNAKE_CASE__ : int SCREAMING_SNAKE_CASE__ : int SCREAMING_SNAKE_CASE__ : int SCREAMING_SNAKE_CASE__ : float = 0.0 SCREAMING_SNAKE_CASE__ : int = 1 SCREAMING_SNAKE_CASE__ : int = 1 SCREAMING_SNAKE_CASE__ : bool = True SCREAMING_SNAKE_CASE__ : bool = False SCREAMING_SNAKE_CASE__ : bool = False SCREAMING_SNAKE_CASE__ : bool = False SCREAMING_SNAKE_CASE__ : jnp.dtype = jnp.floataa def snake_case_ ( self ): _lowerCamelCase = [] _lowerCamelCase = [] for i in range(self.num_layers ): _lowerCamelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels _lowerCamelCase = self.prev_output_channel if i == 0 else self.out_channels _lowerCamelCase = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(a__ ) _lowerCamelCase = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(a__ ) _lowerCamelCase = resnets _lowerCamelCase = attentions if self.add_upsample: _lowerCamelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self , a__ , a__ , a__ , a__ , a__=True ): for resnet, attn in zip(self.resnets , self.attentions ): # pop res hidden states _lowerCamelCase = res_hidden_states_tuple[-1] _lowerCamelCase = res_hidden_states_tuple[:-1] _lowerCamelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) _lowerCamelCase = resnet(a__ , a__ , deterministic=a__ ) _lowerCamelCase = attn(a__ , a__ , deterministic=a__ ) if self.add_upsample: _lowerCamelCase = self.upsamplers_a(a__ ) return hidden_states class __a ( nn.Module ): SCREAMING_SNAKE_CASE__ : int SCREAMING_SNAKE_CASE__ : int SCREAMING_SNAKE_CASE__ : int SCREAMING_SNAKE_CASE__ : float = 0.0 SCREAMING_SNAKE_CASE__ : int = 1 SCREAMING_SNAKE_CASE__ : bool = True SCREAMING_SNAKE_CASE__ : jnp.dtype = jnp.floataa def snake_case_ ( self ): _lowerCamelCase = [] for i in range(self.num_layers ): _lowerCamelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels _lowerCamelCase = self.prev_output_channel if i == 0 else self.out_channels _lowerCamelCase = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(a__ ) _lowerCamelCase = resnets if self.add_upsample: _lowerCamelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self , a__ , a__ , a__ , a__=True ): for resnet in self.resnets: # pop res hidden states _lowerCamelCase = res_hidden_states_tuple[-1] _lowerCamelCase = res_hidden_states_tuple[:-1] _lowerCamelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) _lowerCamelCase = resnet(a__ , a__ , deterministic=a__ ) if self.add_upsample: _lowerCamelCase = self.upsamplers_a(a__ ) return hidden_states class __a ( nn.Module ): SCREAMING_SNAKE_CASE__ : int SCREAMING_SNAKE_CASE__ : float = 0.0 SCREAMING_SNAKE_CASE__ : int = 1 SCREAMING_SNAKE_CASE__ : int = 1 SCREAMING_SNAKE_CASE__ : bool = False SCREAMING_SNAKE_CASE__ : bool = False SCREAMING_SNAKE_CASE__ : jnp.dtype = jnp.floataa def snake_case_ ( self ): # there is always at least one resnet _lowerCamelCase = [ FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) ] _lowerCamelCase = [] for _ in range(self.num_layers ): _lowerCamelCase = FlaxTransformeraDModel( in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(a__ ) _lowerCamelCase = FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(a__ ) _lowerCamelCase = resnets _lowerCamelCase = attentions def __call__( self , a__ , a__ , a__ , a__=True ): _lowerCamelCase = self.resnets[0](a__ , a__ ) for attn, resnet in zip(self.attentions , self.resnets[1:] ): _lowerCamelCase = attn(a__ , a__ , deterministic=a__ ) _lowerCamelCase = resnet(a__ , a__ , deterministic=a__ ) return hidden_states
80
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING A_ : int =logging.get_logger(__name__) A_ : Tuple ={ """ut/deta""": """https://huggingface.co/ut/deta/resolve/main/config.json""", } class __a ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE__ : int = "deta" SCREAMING_SNAKE_CASE__ : Union[str, Any] = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", } def __init__( self , a__=None , a__=9_00 , a__=20_48 , a__=6 , a__=20_48 , a__=8 , a__=6 , a__=10_24 , a__=8 , a__=0.0 , a__=True , a__="relu" , a__=2_56 , a__=0.1 , a__=0.0 , a__=0.0 , a__=0.02 , a__=1.0 , a__=True , a__=False , a__="sine" , a__=5 , a__=4 , a__=4 , a__=True , a__=3_00 , a__=True , a__=True , a__=1 , a__=5 , a__=2 , a__=1 , a__=1 , a__=5 , a__=2 , a__=0.1 , a__=0.25 , **a__ , ): if backbone_config is None: logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' ) _lowerCamelCase = CONFIG_MAPPING['resnet'](out_features=['stage2', 'stage3', 'stage4'] ) else: if isinstance(a__ , a__ ): _lowerCamelCase = backbone_config.pop('model_type' ) _lowerCamelCase = CONFIG_MAPPING[backbone_model_type] _lowerCamelCase = config_class.from_dict(a__ ) _lowerCamelCase = backbone_config _lowerCamelCase = num_queries _lowerCamelCase = max_position_embeddings _lowerCamelCase = d_model _lowerCamelCase = encoder_ffn_dim _lowerCamelCase = encoder_layers _lowerCamelCase = encoder_attention_heads _lowerCamelCase = decoder_ffn_dim _lowerCamelCase = decoder_layers _lowerCamelCase = decoder_attention_heads _lowerCamelCase = dropout _lowerCamelCase = attention_dropout _lowerCamelCase = activation_dropout _lowerCamelCase = activation_function _lowerCamelCase = init_std _lowerCamelCase = init_xavier_std _lowerCamelCase = encoder_layerdrop _lowerCamelCase = auxiliary_loss _lowerCamelCase = position_embedding_type # deformable attributes _lowerCamelCase = num_feature_levels _lowerCamelCase = encoder_n_points _lowerCamelCase = decoder_n_points _lowerCamelCase = two_stage _lowerCamelCase = two_stage_num_proposals _lowerCamelCase = with_box_refine _lowerCamelCase = assign_first_stage if two_stage is True and with_box_refine is False: raise ValueError('If two_stage is True, with_box_refine must be True.' ) # Hungarian matcher _lowerCamelCase = class_cost _lowerCamelCase = bbox_cost _lowerCamelCase = giou_cost # Loss coefficients _lowerCamelCase = mask_loss_coefficient _lowerCamelCase = dice_loss_coefficient _lowerCamelCase = bbox_loss_coefficient _lowerCamelCase = giou_loss_coefficient _lowerCamelCase = eos_coefficient _lowerCamelCase = focal_alpha super().__init__(is_encoder_decoder=a__ , **a__ ) @property def snake_case_ ( self ): return self.encoder_attention_heads @property def snake_case_ ( self ): return self.d_model def snake_case_ ( self ): _lowerCamelCase = copy.deepcopy(self.__dict__ ) _lowerCamelCase = self.backbone_config.to_dict() _lowerCamelCase = self.__class__.model_type return output
80
1
import inspect import unittest import warnings from transformers import DeiTConfig from transformers.models.auto import get_values from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_MAPPING, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, ) from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class A__ : """simple docstring""" def __init__( self , lowercase , lowercase=13 , lowercase=30 , lowercase=2 , lowercase=3 , lowercase=True , lowercase=True , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=10 , lowercase=0.02 , lowercase=3 , lowercase=None , lowercase=2 , ) -> Optional[int]: '''simple docstring''' a__ : List[Any] = parent a__ : str = batch_size a__ : Any = image_size a__ : int = patch_size a__ : Tuple = num_channels a__ : Optional[int] = is_training a__ : int = use_labels a__ : Union[str, Any] = hidden_size a__ : str = num_hidden_layers a__ : List[Any] = num_attention_heads a__ : Dict = intermediate_size a__ : Any = hidden_act a__ : Any = hidden_dropout_prob a__ : List[str] = attention_probs_dropout_prob a__ : Dict = type_sequence_label_size a__ : List[Any] = initializer_range a__ : Dict = scope a__ : List[Any] = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) a__ : Any = (image_size // patch_size) ** 2 a__ : Any = num_patches + 2 def __lowercase ( self) -> Union[str, Any]: '''simple docstring''' a__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) a__ : int = None if self.use_labels: a__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size) a__ : Tuple = self.get_config() return config, pixel_values, labels def __lowercase ( self) -> Dict: '''simple docstring''' return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def __lowercase ( self , lowercase , lowercase , lowercase) -> int: '''simple docstring''' a__ : int = DeiTModel(config=lowercase) model.to(lowercase) model.eval() a__ : Union[str, Any] = model(lowercase) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def __lowercase ( self , lowercase , lowercase , lowercase) -> str: '''simple docstring''' a__ : List[str] = DeiTForMaskedImageModeling(config=lowercase) model.to(lowercase) model.eval() a__ : List[Any] = model(lowercase) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size)) # test greyscale images a__ : Tuple = 1 a__ : Optional[Any] = DeiTForMaskedImageModeling(lowercase) model.to(lowercase) model.eval() a__ : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) a__ : Dict = model(lowercase) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size)) def __lowercase ( self , lowercase , lowercase , lowercase) -> Optional[int]: '''simple docstring''' a__ : int = self.type_sequence_label_size a__ : List[str] = DeiTForImageClassification(lowercase) model.to(lowercase) model.eval() a__ : Tuple = model(lowercase , labels=lowercase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) # test greyscale images a__ : Union[str, Any] = 1 a__ : Optional[Any] = DeiTForImageClassification(lowercase) model.to(lowercase) model.eval() a__ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) a__ : str = model(lowercase , labels=lowercase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) def __lowercase ( self) -> Dict: '''simple docstring''' a__ : Tuple = self.prepare_config_and_inputs() ( ( a__ ) , ( a__ ) , ( a__ ) , ) : Union[str, Any] = config_and_inputs a__ : Dict = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class A__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ): """simple docstring""" __A : Dict = ( ( DeiTModel, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, ) if is_torch_available() else () ) __A : str = ( { '''feature-extraction''': DeiTModel, '''image-classification''': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher), } if is_torch_available() else {} ) __A : List[str] = False __A : Union[str, Any] = False __A : Optional[Any] = False def __lowercase ( self) -> List[Any]: '''simple docstring''' a__ : List[str] = DeiTModelTester(self) a__ : str = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase , hidden_size=37) def __lowercase ( self) -> str: '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='DeiT does not use inputs_embeds') def __lowercase ( self) -> Tuple: '''simple docstring''' pass def __lowercase ( self) -> List[str]: '''simple docstring''' a__ , a__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a__ : Union[str, Any] = model_class(lowercase) self.assertIsInstance(model.get_input_embeddings() , (nn.Module)) a__ : List[str] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowercase , nn.Linear)) def __lowercase ( self) -> Tuple: '''simple docstring''' a__ , a__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a__ : Union[str, Any] = model_class(lowercase) a__ : List[str] = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic a__ : Union[str, Any] = [*signature.parameters.keys()] a__ : Any = ['pixel_values'] self.assertListEqual(arg_names[:1] , lowercase) def __lowercase ( self) -> int: '''simple docstring''' a__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase) def __lowercase ( self) -> Any: '''simple docstring''' a__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*lowercase) def __lowercase ( self) -> List[str]: '''simple docstring''' a__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowercase) def __lowercase ( self , lowercase , lowercase , lowercase=False) -> Tuple: '''simple docstring''' a__ : Optional[int] = super()._prepare_for_class(lowercase , lowercase , return_labels=lowercase) if return_labels: if model_class.__name__ == "DeiTForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def __lowercase ( self) -> List[Any]: '''simple docstring''' if not self.model_tester.is_training: return a__ , a__ : int = self.model_tester.prepare_config_and_inputs_for_common() a__ : List[Any] = True for model_class in self.all_model_classes: # DeiTForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(lowercase) or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue a__ : List[str] = model_class(lowercase) model.to(lowercase) model.train() a__ : str = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase) a__ : str = model(**lowercase).loss loss.backward() def __lowercase ( self) -> Optional[int]: '''simple docstring''' a__ , a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return a__ : List[str] = False a__ : List[str] = True for model_class in self.all_model_classes: if model_class in get_values(lowercase) or not model_class.supports_gradient_checkpointing: continue # DeiTForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "DeiTForImageClassificationWithTeacher": continue a__ : int = model_class(lowercase) model.gradient_checkpointing_enable() model.to(lowercase) model.train() a__ : List[Any] = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase) a__ : List[Any] = model(**lowercase).loss loss.backward() def __lowercase ( self) -> Dict: '''simple docstring''' a__ , a__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() a__ : Tuple = [ {'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float}, {'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long}, {'title': 'regression', 'num_labels': 1, 'dtype': torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(lowercase), *get_values(lowercase), ] or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=F'Testing {model_class} with {problem_type["title"]}'): a__ : List[str] = problem_type['title'] a__ : Any = problem_type['num_labels'] a__ : Union[str, Any] = model_class(lowercase) model.to(lowercase) model.train() a__ : List[Any] = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase) if problem_type["num_labels"] > 1: a__ : Any = inputs['labels'].unsqueeze(1).repeat(1 , problem_type['num_labels']) a__ : Tuple = inputs['labels'].to(problem_type['dtype']) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=lowercase) as warning_list: a__ : Optional[Any] = model(**lowercase).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message): raise ValueError( F'Something is going wrong in the regression problem: intercepted {w.message}') loss.backward() @slow def __lowercase ( self) -> Any: '''simple docstring''' for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a__ : Any = DeiTModel.from_pretrained(lowercase) self.assertIsNotNone(lowercase) def A_ ( ) -> Any: a__ : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class A__ ( unittest.TestCase ): """simple docstring""" @cached_property def __lowercase ( self) -> List[str]: '''simple docstring''' return ( DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224') if is_vision_available() else None ) @slow def __lowercase ( self) -> Union[str, Any]: '''simple docstring''' a__ : Optional[Any] = DeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224').to( lowercase) a__ : Optional[Any] = self.default_image_processor a__ : str = prepare_img() a__ : Optional[int] = image_processor(images=lowercase , return_tensors='pt').to(lowercase) # forward pass with torch.no_grad(): a__ : int = model(**lowercase) # verify the logits a__ : Tuple = torch.Size((1, 1000)) self.assertEqual(outputs.logits.shape , lowercase) a__ : int = torch.tensor([-1.02_66, 0.19_12, -1.28_61]).to(lowercase) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase , atol=1e-4)) @slow @require_accelerate @require_torch_gpu def __lowercase ( self) -> Any: '''simple docstring''' a__ : Dict = DeiTModel.from_pretrained( 'facebook/deit-base-distilled-patch16-224' , torch_dtype=torch.floataa , device_map='auto') a__ : Union[str, Any] = self.default_image_processor a__ : List[str] = prepare_img() a__ : int = image_processor(images=lowercase , return_tensors='pt') a__ : str = inputs.pixel_values.to(lowercase) # forward pass to make sure inference works in fp16 with torch.no_grad(): a__ : Any = model(lowercase)
99
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) __a = {'configuration_deit': ['DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DeiTConfig', 'DeiTOnnxConfig']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ['DeiTFeatureExtractor'] __a = ['DeiTImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'DEIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'DeiTForImageClassification', 'DeiTForImageClassificationWithTeacher', 'DeiTForMaskedImageModeling', 'DeiTModel', 'DeiTPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFDeiTForImageClassification', 'TFDeiTForImageClassificationWithTeacher', 'TFDeiTForMaskedImageModeling', 'TFDeiTModel', 'TFDeiTPreTrainedModel', ] if TYPE_CHECKING: from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_deit import DeiTFeatureExtractor from .image_processing_deit import DeiTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deit import ( DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, DeiTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deit import ( TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, TFDeiTPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
30
0
import json import os import tempfile from transformers.testing_utils import check_json_file_has_correct_format class lowerCamelCase : """simple docstring""" lowerCamelCase__ = None def __A ( self : Optional[Any] ) -> int: SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_dict ) SCREAMING_SNAKE_CASE_ = json.loads(feat_extract.to_json_string() ) for key, value in self.feat_extract_dict.items(): self.assertEqual(obj[key] , lowerCAmelCase__ ) def __A ( self : Any ) -> str: SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE_ = os.path.join(lowerCAmelCase__ , "feat_extract.json" ) feat_extract_first.to_json_file(lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ = self.feature_extraction_class.from_json_file(lowerCAmelCase__ ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def __A ( self : List[str] ) -> Tuple: SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE_ = feat_extract_first.save_pretrained(lowerCAmelCase__ )[0] check_json_file_has_correct_format(lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ = self.feature_extraction_class.from_pretrained(lowerCAmelCase__ ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def __A ( self : Optional[int] ) -> List[Any]: SCREAMING_SNAKE_CASE_ = self.feature_extraction_class() self.assertIsNotNone(lowerCAmelCase__ )
366
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.utils import ComputeEnvironment from .cluster import get_cluster_input from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401 from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401 from .sagemaker import get_sagemaker_input A : Union[str, Any] = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine" def a__ ( ): SCREAMING_SNAKE_CASE_ = _ask_options( "In which compute environment are you running?" , ["This machine", "AWS (Amazon SageMaker)"] , _convert_compute_environment , ) if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER: SCREAMING_SNAKE_CASE_ = get_sagemaker_input() else: SCREAMING_SNAKE_CASE_ = get_cluster_input() return config def a__ ( __UpperCamelCase=None ): if subparsers is not None: SCREAMING_SNAKE_CASE_ = subparsers.add_parser("config" , description=__UpperCamelCase ) else: SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser("Accelerate config command" , description=__UpperCamelCase ) parser.add_argument( "--config_file" , default=__UpperCamelCase , help=( "The path to use to store the config file. Will default to a file named default_config.yaml in the cache " "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have " "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed " "with 'huggingface'." ) , ) if subparsers is not None: parser.set_defaults(func=__UpperCamelCase ) return parser def a__ ( __UpperCamelCase ): SCREAMING_SNAKE_CASE_ = get_user_input() if args.config_file is not None: SCREAMING_SNAKE_CASE_ = args.config_file else: if not os.path.isdir(__UpperCamelCase ): os.makedirs(__UpperCamelCase ) SCREAMING_SNAKE_CASE_ = default_yaml_config_file if config_file.endswith(".json" ): config.to_json_file(__UpperCamelCase ) else: config.to_yaml_file(__UpperCamelCase ) print(F'''accelerate configuration saved at {config_file}''' ) def a__ ( ): SCREAMING_SNAKE_CASE_ = config_command_parser() SCREAMING_SNAKE_CASE_ = parser.parse_args() config_command(__UpperCamelCase ) if __name__ == "__main__": main()
305
0
'''simple docstring''' import unittest from accelerate import debug_launcher from accelerate.test_utils import require_cpu, test_ops, test_script @require_cpu class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def A_ ( self ): '''simple docstring''' debug_launcher(test_script.main ) def A_ ( self ): '''simple docstring''' debug_launcher(test_ops.main )
311
'''simple docstring''' import argparse import shutil import time from json import JSONDecodeError from logging import getLogger from pathlib import Path from typing import Dict, List import torch from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from utils import ( SeqaSeqDataset, calculate_bleu, calculate_rouge, chunks, lmap, load_json, parse_numeric_n_bool_cl_kwargs, save_json, use_task_specific_params, write_txt_file, ) a : str = getLogger(__name__) def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = 8 , __magic_name__ = 1024 , __magic_name__="val" , __magic_name__=None , __magic_name__=False , __magic_name__="summarization" , __magic_name__=None , __magic_name__=1 , __magic_name__ = None , __magic_name__="" , **__magic_name__ , ): '''simple docstring''' UpperCAmelCase : List[Any] = str(__magic_name__ ) assert local_rank is not None torch.distributed.init_process_group(backend="nccl" , rank=__magic_name__ ) UpperCAmelCase : List[str] = Path(__magic_name__ ) UpperCAmelCase : Dict = save_dir.joinpath(F"rank_{local_rank}_output.json" ) torch.cuda.set_device(__magic_name__ ) UpperCAmelCase : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained(__magic_name__ ).cuda() if fpaa: UpperCAmelCase : int = model.half() # determine if we need to increase num_beams use_task_specific_params(__magic_name__ , __magic_name__ ) # update config with task specific params UpperCAmelCase : Dict = generate_kwargs.pop("num_beams" , model.config.num_beams ) # AttributeError risk? if num_return_sequences > num_beams: UpperCAmelCase : Optional[Any] = num_return_sequences UpperCAmelCase : Dict = AutoTokenizer.from_pretrained(__magic_name__ ) logger.info(F"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type. if max_source_length is None: UpperCAmelCase : Any = tokenizer.model_max_length if prefix is None: UpperCAmelCase : Tuple = prefix or getattr(model.config , "prefix" , "" ) or "" UpperCAmelCase : Dict = SeqaSeqDataset( __magic_name__ , __magic_name__ , __magic_name__ , max_target_length=1024 , type_path=__magic_name__ , n_obs=__magic_name__ , prefix=__magic_name__ , **__magic_name__ , ) # I set shuffle=True for a more accurate progress bar. # If all the longest samples are first, the prog bar estimate is too high at the beginning. UpperCAmelCase : int = ds.make_sortish_sampler(__magic_name__ , distributed=__magic_name__ , add_extra_examples=__magic_name__ , shuffle=__magic_name__ ) UpperCAmelCase : List[Any] = DataLoader(__magic_name__ , sampler=__magic_name__ , batch_size=__magic_name__ , collate_fn=ds.collate_fn ) UpperCAmelCase : Any = [] for batch in tqdm(__magic_name__ ): UpperCAmelCase : List[Any] = model.generate( input_ids=batch["input_ids"].to(model.device ) , attention_mask=batch["attention_mask"].to(model.device ) , num_return_sequences=__magic_name__ , num_beams=__magic_name__ , **__magic_name__ , ) UpperCAmelCase : Optional[int] = tokenizer.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ , clean_up_tokenization_spaces=__magic_name__ ) UpperCAmelCase : int = batch["ids"] if num_return_sequences > 1: UpperCAmelCase : List[Any] = chunks(__magic_name__ , __magic_name__ ) # batch size chunks, each of size num_return_seq for i, pred in enumerate(__magic_name__ ): results.append({"pred": pred, "id": ids[i].item()} ) save_json(__magic_name__ , __magic_name__ ) return results, sampler.num_replicas def lowercase ( ): '''simple docstring''' UpperCAmelCase : str = argparse.ArgumentParser( epilog="Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate" ) parser.add_argument("--data_dir" , type=__magic_name__ , help="like cnn_dm/test.source" ) parser.add_argument( "--model_name" , type=__magic_name__ , help="like facebook/bart-large-cnn,t5-base, etc." , default="sshleifer/distilbart-xsum-12-3" , ) parser.add_argument("--save_dir" , type=__magic_name__ , help="where to save" , default="tmp_gen" ) parser.add_argument("--max_source_length" , type=__magic_name__ , default=__magic_name__ ) parser.add_argument( "--type_path" , type=__magic_name__ , default="test" , help="which subset to evaluate typically train/val/test" ) parser.add_argument("--task" , type=__magic_name__ , default="summarization" , help="used for task_specific_params + metrics" ) parser.add_argument("--bs" , type=__magic_name__ , default=8 , required=__magic_name__ , help="batch size" ) parser.add_argument( "--local_rank" , type=__magic_name__ , default=-1 , required=__magic_name__ , help="should be passed by distributed.launch" ) parser.add_argument( "--n_obs" , type=__magic_name__ , default=__magic_name__ , required=__magic_name__ , help="How many observations. Defaults to all." ) parser.add_argument( "--num_return_sequences" , type=__magic_name__ , default=1 , required=__magic_name__ , help="How many sequences to return" ) parser.add_argument( "--sync_timeout" , type=__magic_name__ , default=600 , required=__magic_name__ , help="How long should master process wait for other processes to finish." , ) parser.add_argument("--src_lang" , type=__magic_name__ , default=__magic_name__ , required=__magic_name__ ) parser.add_argument("--tgt_lang" , type=__magic_name__ , default=__magic_name__ , required=__magic_name__ ) parser.add_argument( "--prefix" , type=__magic_name__ , required=__magic_name__ , default=__magic_name__ , help="will be added to the begininng of src examples" ) parser.add_argument("--fp16" , action="store_true" ) parser.add_argument("--debug" , action="store_true" ) UpperCAmelCase : Union[str, Any] = time.time() UpperCAmelCase , UpperCAmelCase : Dict = parser.parse_known_args() UpperCAmelCase : Tuple = parse_numeric_n_bool_cl_kwargs(__magic_name__ ) if generate_kwargs and args.local_rank <= 0: print(F"parsed the following generate kwargs: {generate_kwargs}" ) UpperCAmelCase : Union[str, Any] = Path(args.save_dir + "_tmp" ) Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ ) # this handles locking. UpperCAmelCase : List[Any] = list(json_save_dir.glob("rank_*.json" ) ) if intermediate_files: raise ValueError(F"Found files at {json_save_dir} please move or remove them." ) # In theory, a node could finish and save before another node hits this. If this happens, we can address later. UpperCAmelCase : Optional[Any] = {} if args.src_lang is not None: UpperCAmelCase : List[str] = args.src_lang if args.tgt_lang is not None: UpperCAmelCase : Dict = args.tgt_lang Path(args.save_dir ).mkdir(exist_ok=__magic_name__ ) UpperCAmelCase , UpperCAmelCase : str = eval_data_dir( args.data_dir , __magic_name__ , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=__magic_name__ , **__magic_name__ , ) if args.local_rank <= 0: UpperCAmelCase : List[str] = Path(args.save_dir ) save_dir.mkdir(exist_ok=__magic_name__ ) UpperCAmelCase : str = gather_results_from_each_node(__magic_name__ , __magic_name__ , args.sync_timeout ) UpperCAmelCase : Dict = combine_partial_results(__magic_name__ ) if args.num_return_sequences > 1: UpperCAmelCase : int = save_dir.joinpath("pseudolabel_results.json" ) print(F"Saving aggregated results at {save_path}, intermediate in {json_save_dir}/" ) save_json(__magic_name__ , __magic_name__ ) return UpperCAmelCase : Dict = Path(args.data_dir ).joinpath(args.type_path + ".target" ) with open(__magic_name__ ) as f: UpperCAmelCase : Dict = [x.rstrip() for x in f.readlines()][: len(__magic_name__ )] # Calculate metrics, save metrics, and save _generations.txt UpperCAmelCase : Optional[int] = "translation" in args.task UpperCAmelCase : str = calculate_bleu if calc_bleu else calculate_rouge UpperCAmelCase : Tuple = "bleu" if calc_bleu else "rouge" UpperCAmelCase : Dict = score_fn(__magic_name__ , __magic_name__ ) UpperCAmelCase : Any = len(__magic_name__ ) UpperCAmelCase : Union[str, Any] = time.time() - start_time UpperCAmelCase : Dict = round(runtime / metrics["n_obs"] , 4 ) UpperCAmelCase : Optional[Any] = num_replicas # TODO(@stas00): add whatever metadata to metrics UpperCAmelCase : Dict = save_dir.joinpath(F"{args.type_path}_{metric_name}.json" ) save_json(__magic_name__ , __magic_name__ , indent=__magic_name__ ) print(__magic_name__ ) write_txt_file(__magic_name__ , save_dir.joinpath(F"{args.type_path}_generations.txt" ) ) if args.debug: write_txt_file(__magic_name__ , save_dir.joinpath(F"{args.type_path}.target" ) ) else: shutil.rmtree(__magic_name__ ) def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Tuple = [] for partial_result in partial_results: records.extend(__magic_name__ ) UpperCAmelCase : Optional[Any] = sorted(__magic_name__ , key=lambda __magic_name__ : x["id"] ) UpperCAmelCase : List[Any] = [x["pred"] for x in records] return preds def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : Dict = time.time() logger.info("waiting for all nodes to finish" ) UpperCAmelCase : Union[str, Any] = None while (time.time() - start_wait) < timeout: UpperCAmelCase : Dict = list(save_dir.glob("rank_*.json" ) ) if len(__magic_name__ ) < num_replicas: continue try: # make sure all json files are fully saved UpperCAmelCase : List[str] = lmap(__magic_name__ , __magic_name__ ) return json_data except JSONDecodeError: continue else: raise TimeoutError("Rank 0 gave up on waiting for other processes" ) # Unreachable if __name__ == "__main__": # Usage for MT: run_generate()
311
1
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConformerConfig, WavaVecaConformerForCTC, WavaVecaConformerForPreTraining, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() A : Union[str, Any] = logging.get_logger(__name__) A : Union[str, Any] = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.linear_k': 'encoder.layers.*.self_attn.linear_k', 'self_attn.linear_v': 'encoder.layers.*.self_attn.linear_v', 'self_attn.linear_q': 'encoder.layers.*.self_attn.linear_q', 'self_attn.pos_bias_u': 'encoder.layers.*.self_attn.pos_bias_u', 'self_attn.pos_bias_v': 'encoder.layers.*.self_attn.pos_bias_v', 'self_attn.linear_out': 'encoder.layers.*.self_attn.linear_out', 'self_attn.linear_pos': 'encoder.layers.*.self_attn.linear_pos', 'self_attn.rotary_emb': 'encoder.embed_positions', 'self_attn_layer_norm': 'encoder.layers.*.self_attn_layer_norm', 'conv_module.pointwise_conv1': 'encoder.layers.*.conv_module.pointwise_conv1', 'conv_module.pointwise_conv2': 'encoder.layers.*.conv_module.pointwise_conv2', 'conv_module.depthwise_conv': 'encoder.layers.*.conv_module.depthwise_conv', 'conv_module.batch_norm': 'encoder.layers.*.conv_module.batch_norm', 'conv_module.layer_norm': 'encoder.layers.*.conv_module.layer_norm', 'ffn1.w_1': 'encoder.layers.*.ffn1.intermediate_dense', 'ffn1.w_2': 'encoder.layers.*.ffn1.output_dense', 'ffn1.layer_norm': 'encoder.layers.*.ffn1_layer_norm', 'ffn2.w_1': 'encoder.layers.*.ffn2.intermediate_dense', 'ffn2.w_2': 'encoder.layers.*.ffn2.output_dense', 'ffn2.layer_norm': 'encoder.layers.*.ffn2_layer_norm', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', } A : List[str] = [ 'lm_head', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', ] def __lowerCAmelCase ( a__ , a__ , a__ , a__ , a__ ) -> Any: for attribute in key.split('''.''' ): __a = getattr(a__ , a__ ) if weight_type is not None: __a = getattr(a__ , a__ ).shape else: __a = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": __a = value elif weight_type == "weight_g": __a = value elif weight_type == "weight_v": __a = value elif weight_type == "bias": __a = value elif weight_type == "running_mean": __a = value elif weight_type == "running_var": __a = value elif weight_type == "num_batches_tracked": __a = value elif weight_type == "inv_freq": __a = value else: __a = value logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" ) def __lowerCAmelCase ( a__ , a__ , a__ ) -> str: __a = [] __a = fairseq_model.state_dict() __a = hf_model.wavaveca_conformer.feature_extractor for name, value in fairseq_dict.items(): __a = False if "conv_layers" in name: load_conv_layer( a__ , a__ , a__ , a__ , hf_model.config.feat_extract_norm == '''group''' , ) __a = True else: for key, mapped_key in MAPPING.items(): __a = '''wav2vec2_conformer.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: __a = True if "*" in mapped_key: __a = name.split(a__ )[0].split('''.''' )[-2] __a = mapped_key.replace('''*''' , a__ ) if "pos_bias_u" in name: __a = None elif "pos_bias_v" in name: __a = None elif "weight_g" in name: __a = '''weight_g''' elif "weight_v" in name: __a = '''weight_v''' elif "bias" in name: __a = '''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj __a = '''weight''' elif "running_mean" in name: __a = '''running_mean''' elif "inv_freq" in name: __a = '''inv_freq''' elif "running_var" in name: __a = '''running_var''' elif "num_batches_tracked" in name: __a = '''num_batches_tracked''' else: __a = None set_recursively(a__ , a__ , a__ , a__ , a__ ) continue if not is_used: unused_weights.append(a__ ) logger.warning(F"""Unused weights: {unused_weights}""" ) def __lowerCAmelCase ( a__ , a__ , a__ , a__ , a__ ) -> Tuple: __a = full_name.split('''conv_layers.''' )[-1] __a = name.split('''.''' ) __a = int(items[0] ) __a = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) __a = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) __a = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" ) __a = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" ) __a = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(a__ ) @torch.no_grad() def __lowerCAmelCase ( a__ , a__ , a__=None , a__=None , a__=True ) -> str: if config_path is not None: __a = WavaVecaConformerConfig.from_pretrained(a__ , hidden_act='''swish''' ) else: __a = WavaVecaConformerConfig() if "rope" in checkpoint_path: __a = '''rotary''' if is_finetuned: if dict_path: __a = Dictionary.load(a__ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq __a = target_dict.pad_index __a = target_dict.bos_index __a = target_dict.eos_index __a = len(target_dict.symbols ) __a = os.path.join(a__ , '''vocab.json''' ) if not os.path.isdir(a__ ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(a__ ) ) return os.makedirs(a__ , exist_ok=a__ ) __a = target_dict.indices # fairseq has the <pad> and <s> switched __a = 0 __a = 1 with open(a__ , '''w''' , encoding='''utf-8''' ) as vocab_handle: json.dump(a__ , a__ ) __a = WavaVecaCTCTokenizer( a__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=a__ , ) __a = True if config.feat_extract_norm == '''layer''' else False __a = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=a__ , return_attention_mask=a__ , ) __a = WavaVecaProcessor(feature_extractor=a__ , tokenizer=a__ ) processor.save_pretrained(a__ ) __a = WavaVecaConformerForCTC(a__ ) else: __a = WavaVecaConformerForPreTraining(a__ ) if is_finetuned: __a , __a , __a = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) else: __a = argparse.Namespace(task='''audio_pretraining''' ) __a = fairseq.tasks.setup_task(a__ ) __a , __a , __a = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=a__ ) __a = model[0].eval() recursively_load_weights(a__ , a__ , not is_finetuned ) hf_wavavec.save_pretrained(a__ ) if __name__ == "__main__": A : Dict = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not' ) A : Optional[Any] = parser.parse_args() convert_wavaveca_conformer_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
33
import argparse import fairseq import torch from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging logging.set_verbosity_info() A : str = logging.get_logger(__name__) A : Any = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'encoder.layer_norm_for_extract': 'layer_norm_for_extract', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'lm_head', 'label_embs_concat': 'label_embeddings_concat', 'mask_emb': 'masked_spec_embed', 'spk_proj': 'speaker_proj', } A : Optional[Any] = [ 'lm_head', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', 'label_embeddings_concat', 'speaker_proj', 'layer_norm_for_extract', ] def __lowerCAmelCase ( a__ , a__ , a__ , a__ , a__ ) -> Dict: for attribute in key.split('''.''' ): __a = getattr(a__ , a__ ) if weight_type is not None: __a = getattr(a__ , a__ ).shape else: __a = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": __a = value elif weight_type == "weight_g": __a = value elif weight_type == "weight_v": __a = value elif weight_type == "bias": __a = value else: __a = value logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" ) def __lowerCAmelCase ( a__ , a__ ) -> List[str]: __a = [] __a = fairseq_model.state_dict() __a = hf_model.unispeech_sat.feature_extractor for name, value in fairseq_dict.items(): __a = False if "conv_layers" in name: load_conv_layer( a__ , a__ , a__ , a__ , hf_model.config.feat_extract_norm == '''group''' , ) __a = True else: for key, mapped_key in MAPPING.items(): __a = '''unispeech_sat.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: if "layer_norm_for_extract" in name and (".".join(name.split('''.''' )[:-1] ) != key): # special case since naming is very similar continue __a = True if "*" in mapped_key: __a = name.split(a__ )[0].split('''.''' )[-2] __a = mapped_key.replace('''*''' , a__ ) if "weight_g" in name: __a = '''weight_g''' elif "weight_v" in name: __a = '''weight_v''' elif "bias" in name: __a = '''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj __a = '''weight''' else: __a = None set_recursively(a__ , a__ , a__ , a__ , a__ ) continue if not is_used: unused_weights.append(a__ ) logger.warning(F"""Unused weights: {unused_weights}""" ) def __lowerCAmelCase ( a__ , a__ , a__ , a__ , a__ ) -> int: __a = full_name.split('''conv_layers.''' )[-1] __a = name.split('''.''' ) __a = int(items[0] ) __a = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) __a = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) __a = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" ) __a = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) __a = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(a__ ) @torch.no_grad() def __lowerCAmelCase ( a__ , a__ , a__=None , a__=None , a__=True ) -> Tuple: if config_path is not None: __a = UniSpeechSatConfig.from_pretrained(a__ ) else: __a = UniSpeechSatConfig() __a = '''''' if is_finetuned: __a = UniSpeechSatForCTC(a__ ) else: __a = UniSpeechSatForPreTraining(a__ ) __a , __a , __a = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) __a = model[0].eval() recursively_load_weights(a__ , a__ ) hf_wavavec.save_pretrained(a__ ) if __name__ == "__main__": A : List[Any] = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not' ) A : Dict = parser.parse_args() convert_unispeech_sat_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
33
1
"""simple docstring""" def A ( snake_case :int ) -> int: if a < 0: raise ValueError('Input value must be a positive integer' ) elif isinstance(snake_case , snake_case ): raise TypeError('Input value must be a \'int\' type' ) return bin(snake_case ).count('1' ) if __name__ == "__main__": import doctest doctest.testmod()
316
"""simple docstring""" # this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.: # python ./utils/get_modified_files.py utils src tests examples # # it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered # since the output of this script is fed into Makefile commands it doesn't print a newline after the results import re import subprocess import sys UpperCamelCase : Union[str, Any] = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8") UpperCamelCase : Any = subprocess.check_output(f'''git diff --name-only {fork_point_sha}'''.split()).decode("utf-8").split() UpperCamelCase : Tuple = "|".join(sys.argv[1:]) UpperCamelCase : Optional[int] = re.compile(Rf'''^({joined_dirs}).*?\.py$''') UpperCamelCase : Optional[Any] = [x for x in modified_files if regex.match(x)] print(" ".join(relevant_modified_files), end="")
316
1
import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""} lowerCAmelCase_ = { """vocab_file""": { """allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json""", """allenai/longformer-large-4096""": ( """https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json""" ), """allenai/longformer-large-4096-finetuned-triviaqa""": ( """https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json""" ), """allenai/longformer-base-4096-extra.pos.embd.only""": ( """https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json""" ), """allenai/longformer-large-4096-extra.pos.embd.only""": ( """https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json""" ), }, """merges_file""": { """allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt""", """allenai/longformer-large-4096""": ( """https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt""" ), """allenai/longformer-large-4096-finetuned-triviaqa""": ( """https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt""" ), """allenai/longformer-base-4096-extra.pos.embd.only""": ( """https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt""" ), """allenai/longformer-large-4096-extra.pos.embd.only""": ( """https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt""" ), }, } lowerCAmelCase_ = { """allenai/longformer-base-4096""": 4096, """allenai/longformer-large-4096""": 4096, """allenai/longformer-large-4096-finetuned-triviaqa""": 4096, """allenai/longformer-base-4096-extra.pos.embd.only""": 4096, """allenai/longformer-large-4096-extra.pos.embd.only""": 4096, } @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def lowerCamelCase_ ( )-> int: _snake_case : Dict = ( list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) ) ) _snake_case : Any = bs[:] _snake_case : int = 0 for b in range(2**8 ): if b not in bs: bs.append(lowerCAmelCase ) cs.append(2**8 + n ) n += 1 _snake_case : int = [chr(lowerCAmelCase ) for n in cs] return dict(zip(lowerCAmelCase , lowerCAmelCase ) ) def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] )-> Tuple: _snake_case : Union[str, Any] = set() _snake_case : Optional[int] = word[0] for char in word[1:]: pairs.add((prev_char, char) ) _snake_case : List[str] = char return pairs class _lowerCAmelCase ( UpperCAmelCase_ ): '''simple docstring''' a_ : Optional[int] =VOCAB_FILES_NAMES a_ : Union[str, Any] =PRETRAINED_VOCAB_FILES_MAP a_ : Union[str, Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a_ : Optional[Any] =["""input_ids""", """attention_mask"""] def __init__( self : Dict , UpperCamelCase : Any , UpperCamelCase : Optional[Any] , UpperCamelCase : Any="replace" , UpperCamelCase : int="<s>" , UpperCamelCase : Union[str, Any]="</s>" , UpperCamelCase : List[Any]="</s>" , UpperCamelCase : Optional[int]="<s>" , UpperCamelCase : Any="<unk>" , UpperCamelCase : Optional[int]="<pad>" , UpperCamelCase : Any="<mask>" , UpperCamelCase : Any=False , **UpperCamelCase : int , ): '''simple docstring''' _snake_case : Any = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else bos_token _snake_case : str = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else eos_token _snake_case : Union[str, Any] = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else sep_token _snake_case : int = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else cls_token _snake_case : Union[str, Any] = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else unk_token _snake_case : Tuple = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it _snake_case : int = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else mask_token super().__init__( errors=UpperCamelCase , bos_token=UpperCamelCase , eos_token=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , cls_token=UpperCamelCase , pad_token=UpperCamelCase , mask_token=UpperCamelCase , add_prefix_space=UpperCamelCase , **UpperCamelCase , ) with open(UpperCamelCase , encoding='utf-8' ) as vocab_handle: _snake_case : str = json.load(UpperCamelCase ) _snake_case : str = {v: k for k, v in self.encoder.items()} _snake_case : str = errors # how to handle errors in decoding _snake_case : str = bytes_to_unicode() _snake_case : List[str] = {v: k for k, v in self.byte_encoder.items()} with open(UpperCamelCase , encoding='utf-8' ) as merges_handle: _snake_case : int = merges_handle.read().split('\n' )[1:-1] _snake_case : Optional[Any] = [tuple(merge.split() ) for merge in bpe_merges] _snake_case : int = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) ) _snake_case : int = {} _snake_case : List[str] = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions _snake_case : Tuple = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' ) @property def UpperCamelCase_ ( self : str ): '''simple docstring''' return len(self.encoder ) def UpperCamelCase_ ( self : Dict ): '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder ) def UpperCamelCase_ ( self : Optional[int] , UpperCamelCase : str ): '''simple docstring''' if token in self.cache: return self.cache[token] _snake_case : Tuple = tuple(UpperCamelCase ) _snake_case : int = get_pairs(UpperCamelCase ) if not pairs: return token while True: _snake_case : Any = min(UpperCamelCase , key=lambda UpperCamelCase : self.bpe_ranks.get(UpperCamelCase , float('inf' ) ) ) if bigram not in self.bpe_ranks: break _snake_case : str = bigram _snake_case : str = [] _snake_case : Optional[Any] = 0 while i < len(UpperCamelCase ): try: _snake_case : Dict = word.index(UpperCamelCase , UpperCamelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) _snake_case : Dict = j if word[i] == first and i < len(UpperCamelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 _snake_case : str = tuple(UpperCamelCase ) _snake_case : List[Any] = new_word if len(UpperCamelCase ) == 1: break else: _snake_case : Dict = get_pairs(UpperCamelCase ) _snake_case : List[Any] = ' '.join(UpperCamelCase ) _snake_case : Tuple = word return word def UpperCamelCase_ ( self : Optional[int] , UpperCamelCase : List[str] ): '''simple docstring''' _snake_case : Optional[Any] = [] for token in re.findall(self.pat , UpperCamelCase ): _snake_case : Dict = ''.join( self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCamelCase ).split(' ' ) ) return bpe_tokens def UpperCamelCase_ ( self : Optional[int] , UpperCamelCase : Optional[Any] ): '''simple docstring''' return self.encoder.get(UpperCamelCase , self.encoder.get(self.unk_token ) ) def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : Tuple ): '''simple docstring''' return self.decoder.get(UpperCamelCase ) def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : Union[str, Any] ): '''simple docstring''' _snake_case : List[Any] = ''.join(UpperCamelCase ) _snake_case : Optional[int] = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors ) return text def UpperCamelCase_ ( self : Any , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(UpperCamelCase ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return _snake_case : Optional[int] = os.path.join( UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) _snake_case : Optional[Any] = os.path.join( UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] ) with open(UpperCamelCase , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase , ensure_ascii=UpperCamelCase ) + '\n' ) _snake_case : Dict = 0 with open(UpperCamelCase , 'w' , encoding='utf-8' ) as writer: writer.write('#version: 0.2\n' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase : kv[1] ): if index != token_index: logger.warning( f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" ' Please check that the tokenizer is not corrupted!' ) _snake_case : Optional[int] = token_index writer.write(' '.join(UpperCamelCase ) + '\n' ) index += 1 return vocab_file, merge_file def UpperCamelCase_ ( self : List[str] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _snake_case : List[Any] = [self.cls_token_id] _snake_case : Optional[int] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def UpperCamelCase_ ( self : Tuple , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None , UpperCamelCase : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase , token_ids_a=UpperCamelCase , already_has_special_tokens=UpperCamelCase ) if token_ids_a is None: return [1] + ([0] * len(UpperCamelCase )) + [1] return [1] + ([0] * len(UpperCamelCase )) + [1, 1] + ([0] * len(UpperCamelCase )) + [1] def UpperCamelCase_ ( self : Tuple , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ): '''simple docstring''' _snake_case : int = [self.sep_token_id] _snake_case : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def UpperCamelCase_ ( self : Any , UpperCamelCase : List[str] , UpperCamelCase : Dict=False , **UpperCamelCase : Dict ): '''simple docstring''' _snake_case : Union[str, Any] = kwargs.pop('add_prefix_space' , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(UpperCamelCase ) > 0 and not text[0].isspace()): _snake_case : Optional[Any] = ' ' + text return (text, kwargs)
356
import argparse from pathlib import Path from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] , lowerCAmelCase: str , lowerCAmelCase: str , lowerCAmelCase: Path , lowerCAmelCase: str = None , lowerCAmelCase: str = None , lowerCAmelCase: str = None , )-> List[Any]: if config_name_or_path is None: _snake_case : int = 'facebook/rag-token-base' if model_type == 'rag_token' else 'facebook/rag-sequence-base' if generator_tokenizer_name_or_path is None: _snake_case : Optional[int] = generator_name_or_path if question_encoder_tokenizer_name_or_path is None: _snake_case : List[str] = question_encoder_name_or_path _snake_case : List[str] = RagTokenForGeneration if model_type == 'rag_token' else RagSequenceForGeneration # Save model. _snake_case : Any = RagConfig.from_pretrained(lowerCAmelCase ) _snake_case : Tuple = AutoConfig.from_pretrained(lowerCAmelCase ) _snake_case : Any = AutoConfig.from_pretrained(lowerCAmelCase ) _snake_case : int = gen_config _snake_case : Tuple = question_encoder_config _snake_case : int = model_class.from_pretrained_question_encoder_generator( lowerCAmelCase , lowerCAmelCase , config=lowerCAmelCase ) rag_model.save_pretrained(lowerCAmelCase ) # Sanity check. model_class.from_pretrained(lowerCAmelCase ) # Save tokenizers. _snake_case : int = AutoTokenizer.from_pretrained(lowerCAmelCase ) gen_tokenizer.save_pretrained(dest_dir / 'generator_tokenizer/' ) _snake_case : str = AutoTokenizer.from_pretrained(lowerCAmelCase ) question_encoder_tokenizer.save_pretrained(dest_dir / 'question_encoder_tokenizer/' ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument( """--model_type""", choices=["""rag_sequence""", """rag_token"""], required=True, type=str, help="""RAG model type: rag_sequence, rag_token""", ) parser.add_argument("""--dest""", type=str, required=True, help="""Path to the output checkpoint directory.""") parser.add_argument("""--generator_name_or_path""", type=str, required=True, help="""Generator model identifier""") parser.add_argument( """--question_encoder_name_or_path""", type=str, required=True, help="""Question encoder model identifier""" ) parser.add_argument( """--generator_tokenizer_name_or_path""", type=str, help="""Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``""", ) parser.add_argument( """--question_encoder_tokenizer_name_or_path""", type=str, help="""Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``""", ) parser.add_argument( """--config_name_or_path""", type=str, help=( """Identifier of the model config to use, if not provided, resolves to a base config for a given""" """ ``model_type``""" ), ) lowerCAmelCase_ = parser.parse_args() lowerCAmelCase_ = Path(args.dest) dest_dir.mkdir(exist_ok=True) consolidate( args.model_type, args.generator_name_or_path, args.question_encoder_name_or_path, dest_dir, args.config_name_or_path, args.generator_tokenizer_name_or_path, args.question_encoder_tokenizer_name_or_path, )
260
0
"""simple docstring""" class lowerCAmelCase__ : '''simple docstring''' def __init__( self , lowercase , lowercase , lowercase ): _lowerCamelCase : Any = None _lowerCamelCase : Dict = None _lowerCamelCase : List[str] = graph self._normalize_graph(lowercase , lowercase ) _lowerCamelCase : Optional[Any] = len(lowercase ) _lowerCamelCase : Optional[Any] = None def A_ ( self , lowercase , lowercase ): if sources is int: _lowerCamelCase : List[str] = [sources] if sinks is int: _lowerCamelCase : Dict = [sinks] if len(lowercase ) == 0 or len(lowercase ) == 0: return _lowerCamelCase : Union[str, Any] = sources[0] _lowerCamelCase : Tuple = sinks[0] # make fake vertex if there are more # than one source or sink if len(lowercase ) > 1 or len(lowercase ) > 1: _lowerCamelCase : Tuple = 0 for i in sources: max_input_flow += sum(self.graph[i] ) _lowerCamelCase : Tuple = len(self.graph ) + 1 for room in self.graph: room.insert(0 , 0 ) self.graph.insert(0 , [0] * size ) for i in sources: _lowerCamelCase : List[Any] = max_input_flow _lowerCamelCase : Any = 0 _lowerCamelCase : Tuple = len(self.graph ) + 1 for room in self.graph: room.append(0 ) self.graph.append([0] * size ) for i in sinks: _lowerCamelCase : int = max_input_flow _lowerCamelCase : List[str] = size - 1 def A_ ( self ): if self.maximum_flow_algorithm is None: raise Exception('You need to set maximum flow algorithm before.' ) if self.source_index is None or self.sink_index is None: return 0 self.maximum_flow_algorithm.execute() return self.maximum_flow_algorithm.getMaximumFlow() def A_ ( self , lowercase ): _lowerCamelCase : List[Any] = algorithm(self ) class lowerCAmelCase__ : '''simple docstring''' def __init__( self , lowercase ): _lowerCamelCase : Any = flow_network _lowerCamelCase : Optional[int] = flow_network.verticesCount _lowerCamelCase : Tuple = flow_network.sourceIndex _lowerCamelCase : int = flow_network.sinkIndex # it's just a reference, so you shouldn't change # it in your algorithms, use deep copy before doing that _lowerCamelCase : Dict = flow_network.graph _lowerCamelCase : Tuple = False def A_ ( self ): if not self.executed: self._algorithm() _lowerCamelCase : Any = True def A_ ( self ): pass class lowerCAmelCase__ ( lowercase ): '''simple docstring''' def __init__( self , lowercase ): super().__init__(lowercase ) # use this to save your result _lowerCamelCase : str = -1 def A_ ( self ): if not self.executed: raise Exception('You should execute algorithm before using its result!' ) return self.maximum_flow class lowerCAmelCase__ ( lowercase ): '''simple docstring''' def __init__( self , lowercase ): super().__init__(lowercase ) _lowerCamelCase : Dict = [[0] * self.verticies_count for i in range(self.verticies_count )] _lowerCamelCase : List[str] = [0] * self.verticies_count _lowerCamelCase : int = [0] * self.verticies_count def A_ ( self ): _lowerCamelCase : List[str] = self.verticies_count # push some substance to graph for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ): self.preflow[self.source_index][nextvertex_index] += bandwidth self.preflow[nextvertex_index][self.source_index] -= bandwidth self.excesses[nextvertex_index] += bandwidth # Relabel-to-front selection rule _lowerCamelCase : int = [ i for i in range(self.verticies_count ) if i != self.source_index and i != self.sink_index ] # move through list _lowerCamelCase : int = 0 while i < len(lowercase ): _lowerCamelCase : Any = vertices_list[i] _lowerCamelCase : Union[str, Any] = self.heights[vertex_index] self.process_vertex(lowercase ) if self.heights[vertex_index] > previous_height: # if it was relabeled, swap elements # and start from 0 index vertices_list.insert(0 , vertices_list.pop(lowercase ) ) _lowerCamelCase : List[Any] = 0 else: i += 1 _lowerCamelCase : Any = sum(self.preflow[self.source_index] ) def A_ ( self , lowercase ): while self.excesses[vertex_index] > 0: for neighbour_index in range(self.verticies_count ): # if it's neighbour and current vertex is higher if ( self.graph[vertex_index][neighbour_index] - self.preflow[vertex_index][neighbour_index] > 0 and self.heights[vertex_index] > self.heights[neighbour_index] ): self.push(lowercase , lowercase ) self.relabel(lowercase ) def A_ ( self , lowercase , lowercase ): _lowerCamelCase : Tuple = min( self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , ) self.preflow[from_index][to_index] += preflow_delta self.preflow[to_index][from_index] -= preflow_delta self.excesses[from_index] -= preflow_delta self.excesses[to_index] += preflow_delta def A_ ( self , lowercase ): _lowerCamelCase : Dict = None for to_index in range(self.verticies_count ): if ( self.graph[vertex_index][to_index] - self.preflow[vertex_index][to_index] > 0 ) and (min_height is None or self.heights[to_index] < min_height): _lowerCamelCase : Any = self.heights[to_index] if min_height is not None: _lowerCamelCase : Optional[Any] = min_height + 1 if __name__ == "__main__": lowercase__ = [0] lowercase__ = [3] # graph = [ # [0, 0, 4, 6, 0, 0], # [0, 0, 5, 2, 0, 0], # [0, 0, 0, 0, 4, 4], # [0, 0, 0, 0, 6, 6], # [0, 0, 0, 0, 0, 0], # [0, 0, 0, 0, 0, 0], # ] lowercase__ = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]] # prepare our network lowercase__ = FlowNetwork(graph, entrances, exits) # set algorithm flow_network.set_maximum_flow_algorithm(PushRelabelExecutor) # and calculate lowercase__ = flow_network.find_maximum_flow() print(F"maximum flow is {maximum_flow}")
96
"""simple docstring""" import datetime import platform import subprocess from typing import Optional, Tuple, Union import numpy as np def _snake_case ( lowercase__ , lowercase__ ): _lowerCamelCase : Tuple = f'''{sampling_rate}''' _lowerCamelCase : str = '1' _lowerCamelCase : str = 'f32le' _lowerCamelCase : Union[str, Any] = [ 'ffmpeg', '-i', 'pipe:0', '-ac', ac, '-ar', ar, '-f', format_for_conversion, '-hide_banner', '-loglevel', 'quiet', 'pipe:1', ] try: with subprocess.Popen(lowercase__ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process: _lowerCamelCase : str = ffmpeg_process.communicate(lowercase__ ) except FileNotFoundError as error: raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error _lowerCamelCase : List[Any] = output_stream[0] _lowerCamelCase : Tuple = np.frombuffer(lowercase__ , np.floataa ) if audio.shape[0] == 0: raise ValueError('Malformed soundfile' ) return audio def _snake_case ( lowercase__ , lowercase__ , lowercase__ = "f32le" , ): _lowerCamelCase : Optional[Any] = f'''{sampling_rate}''' _lowerCamelCase : List[str] = '1' if format_for_conversion == "s16le": _lowerCamelCase : List[str] = 2 elif format_for_conversion == "f32le": _lowerCamelCase : List[Any] = 4 else: raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' ) _lowerCamelCase : Dict = platform.system() if system == "Linux": _lowerCamelCase : Optional[int] = 'alsa' _lowerCamelCase : Optional[Any] = 'default' elif system == "Darwin": _lowerCamelCase : Optional[int] = 'avfoundation' _lowerCamelCase : Any = ':0' elif system == "Windows": _lowerCamelCase : Tuple = 'dshow' _lowerCamelCase : Tuple = 'default' _lowerCamelCase : Optional[int] = [ 'ffmpeg', '-f', format_, '-i', input_, '-ac', ac, '-ar', ar, '-f', format_for_conversion, '-fflags', 'nobuffer', '-hide_banner', '-loglevel', 'quiet', 'pipe:1', ] _lowerCamelCase : Tuple = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample _lowerCamelCase : List[Any] = _ffmpeg_stream(lowercase__ , lowercase__ ) for item in iterator: yield item def _snake_case ( lowercase__ , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = "f32le" , ): if stream_chunk_s is not None: _lowerCamelCase : int = stream_chunk_s else: _lowerCamelCase : Optional[Any] = chunk_length_s _lowerCamelCase : Optional[Any] = ffmpeg_microphone(lowercase__ , lowercase__ , format_for_conversion=lowercase__ ) if format_for_conversion == "s16le": _lowerCamelCase : List[str] = np.intaa _lowerCamelCase : str = 2 elif format_for_conversion == "f32le": _lowerCamelCase : Any = np.floataa _lowerCamelCase : List[Any] = 4 else: raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' ) if stride_length_s is None: _lowerCamelCase : Union[str, Any] = chunk_length_s / 6 _lowerCamelCase : Optional[int] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample if isinstance(lowercase__ , (int, float) ): _lowerCamelCase : Any = [stride_length_s, stride_length_s] _lowerCamelCase : Tuple = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample _lowerCamelCase : Optional[Any] = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample _lowerCamelCase : List[Any] = datetime.datetime.now() _lowerCamelCase : Optional[int] = datetime.timedelta(seconds=lowercase__ ) for item in chunk_bytes_iter(lowercase__ , lowercase__ , stride=(stride_left, stride_right) , stream=lowercase__ ): # Put everything back in numpy scale _lowerCamelCase : List[Any] = np.frombuffer(item['raw'] , dtype=lowercase__ ) _lowerCamelCase : int = ( item['stride'][0] // size_of_sample, item['stride'][1] // size_of_sample, ) _lowerCamelCase : Optional[int] = sampling_rate audio_time += delta if datetime.datetime.now() > audio_time + 10 * delta: # We're late !! SKIP continue yield item def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ = False ): _lowerCamelCase : int = B'' _lowerCamelCase, _lowerCamelCase : Dict = stride if stride_left + stride_right >= chunk_len: raise ValueError( f'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' ) _lowerCamelCase : str = 0 for raw in iterator: acc += raw if stream and len(lowercase__ ) < chunk_len: _lowerCamelCase : Optional[int] = (_stride_left, 0) yield {"raw": acc[:chunk_len], "stride": stride, "partial": True} else: while len(lowercase__ ) >= chunk_len: # We are flushing the accumulator _lowerCamelCase : str = (_stride_left, stride_right) _lowerCamelCase : str = {'raw': acc[:chunk_len], 'stride': stride} if stream: _lowerCamelCase : List[Any] = False yield item _lowerCamelCase : Optional[Any] = stride_left _lowerCamelCase : str = acc[chunk_len - stride_left - stride_right :] # Last chunk if len(lowercase__ ) > stride_left: _lowerCamelCase : Optional[Any] = {'raw': acc, 'stride': (_stride_left, 0)} if stream: _lowerCamelCase : Tuple = False yield item def _snake_case ( lowercase__ , lowercase__ ): _lowerCamelCase : int = 2**24 # 16Mo try: with subprocess.Popen(lowercase__ , stdout=subprocess.PIPE , bufsize=lowercase__ ) as ffmpeg_process: while True: _lowerCamelCase : Optional[Any] = ffmpeg_process.stdout.read(lowercase__ ) if raw == b"": break yield raw except FileNotFoundError as error: raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error
96
1
'''simple docstring''' import argparse import logging from collections import namedtuple import torch from model_bertabs import BertAbsSummarizer from models.model_builder import AbsSummarizer # The authors' implementation from transformers import BertTokenizer logging.basicConfig(level=logging.INFO) _A : List[Any] =logging.getLogger(__name__) _A : int ='''Hello world! cécé herlolip''' _A : Any =namedtuple( '''BertAbsConfig''', [ '''temp_dir''', '''large''', '''use_bert_emb''', '''finetune_bert''', '''encoder''', '''share_emb''', '''max_pos''', '''enc_layers''', '''enc_hidden_size''', '''enc_heads''', '''enc_ff_size''', '''enc_dropout''', '''dec_layers''', '''dec_hidden_size''', '''dec_heads''', '''dec_ff_size''', '''dec_dropout''', ], ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Union[str, Any]: lowerCamelCase__ : List[Any] = BertAbsConfig( temp_dir=""".""" , finetune_bert=UpperCamelCase , large=UpperCamelCase , share_emb=UpperCamelCase , use_bert_emb=UpperCamelCase , encoder="""bert""" , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , ) lowerCamelCase__ : Tuple = torch.load(UpperCamelCase , lambda UpperCamelCase , UpperCamelCase : storage ) lowerCamelCase__ : Optional[Any] = AbsSummarizer(UpperCamelCase , torch.device("""cpu""" ) , UpperCamelCase ) original.eval() lowerCamelCase__ : Dict = BertAbsSummarizer(UpperCamelCase , torch.device("""cpu""" ) ) new_model.eval() # ------------------- # Convert the weights # ------------------- logging.info("""convert the model""" ) new_model.bert.load_state_dict(original.bert.state_dict() ) new_model.decoder.load_state_dict(original.decoder.state_dict() ) new_model.generator.load_state_dict(original.generator.state_dict() ) # ---------------------------------- # Make sure the outpus are identical # ---------------------------------- logging.info("""Make sure that the models' outputs are identical""" ) lowerCamelCase__ : str = BertTokenizer.from_pretrained("""bert-base-uncased""" ) # prepare the model inputs lowerCamelCase__ : Any = tokenizer.encode("""This is sample éàalj'-.""" ) encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(UpperCamelCase )) ) lowerCamelCase__ : Dict = torch.tensor(UpperCamelCase ).unsqueeze(0 ) lowerCamelCase__ : List[str] = tokenizer.encode("""This is sample 3 éàalj'-.""" ) decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(UpperCamelCase )) ) lowerCamelCase__ : str = torch.tensor(UpperCamelCase ).unsqueeze(0 ) # failsafe to make sure the weights reset does not affect the # loaded weights. assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0 # forward pass lowerCamelCase__ : List[Any] = encoder_input_ids lowerCamelCase__ : Dict = decoder_input_ids lowerCamelCase__ : Union[str, Any] = None lowerCamelCase__ : int = None lowerCamelCase__ : Dict = None lowerCamelCase__ : Union[str, Any] = None lowerCamelCase__ : str = None # The original model does not apply the geneator layer immediatly but rather in # the beam search (where it combines softmax + linear layer). Since we already # apply the softmax in our generation process we only apply the linear layer here. # We make sure that the outputs of the full stack are identical lowerCamelCase__ : Dict = original(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )[0] lowerCamelCase__ : Any = original.generator(UpperCamelCase ) lowerCamelCase__ : List[Any] = new_model( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )[0] lowerCamelCase__ : int = new_model.generator(UpperCamelCase ) lowerCamelCase__ : Optional[Any] = torch.max(torch.abs(output_converted_model - output_original_model ) ).item() print("""Maximum absolute difference beween weights: {:.2f}""".format(UpperCamelCase ) ) lowerCamelCase__ : int = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item() print("""Maximum absolute difference beween weights: {:.2f}""".format(UpperCamelCase ) ) lowerCamelCase__ : List[Any] = torch.allclose(UpperCamelCase , UpperCamelCase , atol=1E-3 ) if are_identical: logging.info("""all weights are equal up to 1e-3""" ) else: raise ValueError("""the weights are different. The new model is likely different from the original one.""" ) # The model has been saved with torch.save(model) and this is bound to the exact # directory structure. We save the state_dict instead. logging.info("""saving the model's state dictionary""" ) torch.save( new_model.state_dict() , """./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin""" ) if __name__ == "__main__": _A : Dict =argparse.ArgumentParser() parser.add_argument( '''--bertabs_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''', ) _A : int =parser.parse_args() convert_bertabs_checkpoints( args.bertabs_checkpoint_path, args.pytorch_dump_folder_path, )
129
'''simple docstring''' import json import os import shutil import warnings from argparse import ArgumentParser, Namespace from pathlib import Path from typing import List from ..utils import logging from . import BaseTransformersCLICommand try: from cookiecutter.main import cookiecutter _A : List[Any] =True except ImportError: _A : int =False _A : Union[str, Any] =logging.get_logger(__name__) # pylint: disable=invalid-name def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Tuple: return AddNewModelCommand(args.testing , args.testing_file , path=args.path ) class _lowercase ( _lowercase ): @staticmethod def lowerCamelCase_ ( UpperCamelCase__: ArgumentParser ): lowerCamelCase__ : List[str] = parser.add_parser("""add-new-model""" ) add_new_model_parser.add_argument("""--testing""" , action="""store_true""" , help="""If in testing mode.""" ) add_new_model_parser.add_argument("""--testing_file""" , type=UpperCamelCase__ , help="""Configuration file on which to run.""" ) add_new_model_parser.add_argument( """--path""" , type=UpperCamelCase__ , help="""Path to cookiecutter. Should only be used for testing purposes.""" ) add_new_model_parser.set_defaults(func=UpperCamelCase__ ) def __init__( self: Optional[int] , UpperCamelCase__: bool , UpperCamelCase__: str , UpperCamelCase__: str=None , *UpperCamelCase__: Optional[int] ): lowerCamelCase__ : List[Any] = testing lowerCamelCase__ : Tuple = testing_file lowerCamelCase__ : int = path def lowerCamelCase_ ( self: int ): warnings.warn( """The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. """ """It is not actively maintained anymore, so might give a result that won't pass all tests and quality """ """checks, you should use `transformers-cli add-new-model-like` instead.""" ) if not _has_cookiecutter: raise ImportError( """Model creation dependencies are required to use the `add_new_model` command. Install them by running """ """the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n""" ) # Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory lowerCamelCase__ : List[Any] = [directory for directory in os.listdir() if """cookiecutter-template-""" == directory[:22]] if len(UpperCamelCase__ ) > 0: raise ValueError( """Several directories starting with `cookiecutter-template-` in current working directory. """ """Please clean your directory by removing all folders starting with `cookiecutter-template-` or """ """change your working directory.""" ) lowerCamelCase__ : int = ( Path(UpperCamelCase__ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent ) lowerCamelCase__ : int = path_to_transformer_root / """templates""" / """adding_a_new_model""" # Execute cookiecutter if not self._testing: cookiecutter(str(UpperCamelCase__ ) ) else: with open(self._testing_file , """r""" ) as configuration_file: lowerCamelCase__ : List[str] = json.load(UpperCamelCase__ ) cookiecutter( str(path_to_cookiecutter if self._path is None else self._path ) , no_input=UpperCamelCase__ , extra_context=UpperCamelCase__ , ) lowerCamelCase__ : Optional[Any] = [directory for directory in os.listdir() if """cookiecutter-template-""" in directory[:22]][0] # Retrieve configuration with open(directory + """/configuration.json""" , """r""" ) as configuration_file: lowerCamelCase__ : int = json.load(UpperCamelCase__ ) lowerCamelCase__ : Tuple = configuration["""lowercase_modelname"""] lowerCamelCase__ : int = configuration["""generate_tensorflow_pytorch_and_flax"""] os.remove(F'''{directory}/configuration.json''' ) lowerCamelCase__ : Union[str, Any] = """PyTorch""" in generate_tensorflow_pytorch_and_flax lowerCamelCase__ : Union[str, Any] = """TensorFlow""" in generate_tensorflow_pytorch_and_flax lowerCamelCase__ : Tuple = """Flax""" in generate_tensorflow_pytorch_and_flax lowerCamelCase__ : List[str] = F'''{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}''' os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ ) os.makedirs(F'''{path_to_transformer_root}/tests/models/{lowercase_model_name}''' , exist_ok=UpperCamelCase__ ) # Tests require submodules as they have parent imports with open(F'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py''' , """w""" ): pass shutil.move( F'''{directory}/__init__.py''' , F'''{model_dir}/__init__.py''' , ) shutil.move( F'''{directory}/configuration_{lowercase_model_name}.py''' , F'''{model_dir}/configuration_{lowercase_model_name}.py''' , ) def remove_copy_lines(UpperCamelCase__: Optional[int] ): with open(UpperCamelCase__ , """r""" ) as f: lowerCamelCase__ : Union[str, Any] = f.readlines() with open(UpperCamelCase__ , """w""" ) as f: for line in lines: if "# Copied from transformers." not in line: f.write(UpperCamelCase__ ) if output_pytorch: if not self._testing: remove_copy_lines(F'''{directory}/modeling_{lowercase_model_name}.py''' ) shutil.move( F'''{directory}/modeling_{lowercase_model_name}.py''' , F'''{model_dir}/modeling_{lowercase_model_name}.py''' , ) shutil.move( F'''{directory}/test_modeling_{lowercase_model_name}.py''' , F'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py''' , ) else: os.remove(F'''{directory}/modeling_{lowercase_model_name}.py''' ) os.remove(F'''{directory}/test_modeling_{lowercase_model_name}.py''' ) if output_tensorflow: if not self._testing: remove_copy_lines(F'''{directory}/modeling_tf_{lowercase_model_name}.py''' ) shutil.move( F'''{directory}/modeling_tf_{lowercase_model_name}.py''' , F'''{model_dir}/modeling_tf_{lowercase_model_name}.py''' , ) shutil.move( F'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' , F'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py''' , ) else: os.remove(F'''{directory}/modeling_tf_{lowercase_model_name}.py''' ) os.remove(F'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' ) if output_flax: if not self._testing: remove_copy_lines(F'''{directory}/modeling_flax_{lowercase_model_name}.py''' ) shutil.move( F'''{directory}/modeling_flax_{lowercase_model_name}.py''' , F'''{model_dir}/modeling_flax_{lowercase_model_name}.py''' , ) shutil.move( F'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' , F'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py''' , ) else: os.remove(F'''{directory}/modeling_flax_{lowercase_model_name}.py''' ) os.remove(F'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' ) shutil.move( F'''{directory}/{lowercase_model_name}.md''' , F'''{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md''' , ) shutil.move( F'''{directory}/tokenization_{lowercase_model_name}.py''' , F'''{model_dir}/tokenization_{lowercase_model_name}.py''' , ) shutil.move( F'''{directory}/tokenization_fast_{lowercase_model_name}.py''' , F'''{model_dir}/tokenization_{lowercase_model_name}_fast.py''' , ) from os import fdopen, remove from shutil import copymode, move from tempfile import mkstemp def replace(UpperCamelCase__: str , UpperCamelCase__: str , UpperCamelCase__: List[str] ): # Create temp file lowerCamelCase__ , lowerCamelCase__ : Any = mkstemp() lowerCamelCase__ : Tuple = False with fdopen(UpperCamelCase__ , """w""" ) as new_file: with open(UpperCamelCase__ ) as old_file: for line in old_file: new_file.write(UpperCamelCase__ ) if line_to_copy_below in line: lowerCamelCase__ : int = True for line_to_copy in lines_to_copy: new_file.write(UpperCamelCase__ ) if not line_found: raise ValueError(F'''Line {line_to_copy_below} was not found in file.''' ) # Copy the file permissions from the old file to the new file copymode(UpperCamelCase__ , UpperCamelCase__ ) # Remove original file remove(UpperCamelCase__ ) # Move new file move(UpperCamelCase__ , UpperCamelCase__ ) def skip_units(UpperCamelCase__: Optional[int] ): return ( ("generating PyTorch" in line and not output_pytorch) or ("generating TensorFlow" in line and not output_tensorflow) or ("generating Flax" in line and not output_flax) ) def replace_in_files(UpperCamelCase__: List[str] ): with open(UpperCamelCase__ ) as datafile: lowerCamelCase__ : int = [] lowerCamelCase__ : Tuple = False lowerCamelCase__ : int = False for line in datafile: if "# To replace in: " in line and "##" not in line: lowerCamelCase__ : List[str] = line.split("""\"""" )[1] lowerCamelCase__ : List[str] = skip_units(UpperCamelCase__ ) elif "# Below: " in line and "##" not in line: lowerCamelCase__ : List[Any] = line.split("""\"""" )[1] lowerCamelCase__ : Union[str, Any] = skip_units(UpperCamelCase__ ) elif "# End." in line and "##" not in line: if not skip_file and not skip_snippet: replace(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase__ : Union[str, Any] = [] elif "# Replace with" in line and "##" not in line: lowerCamelCase__ : str = [] elif "##" not in line: lines_to_copy.append(UpperCamelCase__ ) remove(UpperCamelCase__ ) replace_in_files(F'''{directory}/to_replace_{lowercase_model_name}.py''' ) os.rmdir(UpperCamelCase__ )
129
1
'''simple docstring''' from typing import List, Optional, Tuple, Union import torch from torch import nn from torch.nn import CrossEntropyLoss from ... import AutoBackbone from ...modeling_outputs import SemanticSegmenterOutput from ...modeling_utils import PreTrainedModel from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings from ...utils.backbone_utils import BackboneMixin from .configuration_upernet import UperNetConfig lowercase : str = [ 'openmmlab/upernet-convnext-tiny', # See all UperNet models at https://huggingface.co/models?filter=upernet ] # General docstring lowercase : Optional[Any] = 'UperNetConfig' class A ( nn.Module ): def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0 , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = 1 , ) -> None: """simple docstring""" super().__init__() A : Optional[int] = nn.Convad( in_channels=SCREAMING_SNAKE_CASE , out_channels=SCREAMING_SNAKE_CASE , kernel_size=SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE , dilation=SCREAMING_SNAKE_CASE , ) A : List[Any] = nn.BatchNormad(SCREAMING_SNAKE_CASE ) A : List[str] = nn.ReLU() def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> torch.Tensor: """simple docstring""" A : Tuple = self.conv(SCREAMING_SNAKE_CASE ) A : Optional[int] = self.batch_norm(SCREAMING_SNAKE_CASE ) A : Optional[Any] = self.activation(SCREAMING_SNAKE_CASE ) return output class A ( nn.Module ): def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> None: """simple docstring""" super().__init__() A : str = [ nn.AdaptiveAvgPoolad(SCREAMING_SNAKE_CASE ), UperNetConvModule(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , kernel_size=1 ), ] for i, layer in enumerate(self.layers ): self.add_module(str(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> torch.Tensor: """simple docstring""" A : Any = input for layer in self.layers: A : Union[str, Any] = layer(SCREAMING_SNAKE_CASE ) return hidden_state class A ( nn.Module ): def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> None: """simple docstring""" super().__init__() A : Dict = pool_scales A : Union[str, Any] = align_corners A : int = in_channels A : List[str] = channels A : List[Any] = [] for i, pool_scale in enumerate(SCREAMING_SNAKE_CASE ): A : str = UperNetPyramidPoolingBlock(pool_scale=SCREAMING_SNAKE_CASE , in_channels=SCREAMING_SNAKE_CASE , channels=SCREAMING_SNAKE_CASE ) self.blocks.append(SCREAMING_SNAKE_CASE ) self.add_module(str(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> List[torch.Tensor]: """simple docstring""" A : int = [] for ppm in self.blocks: A : Tuple = ppm(SCREAMING_SNAKE_CASE ) A : List[Any] = nn.functional.interpolate( SCREAMING_SNAKE_CASE , size=x.size()[2:] , mode='''bilinear''' , align_corners=self.align_corners ) ppm_outs.append(SCREAMING_SNAKE_CASE ) return ppm_outs class A ( nn.Module ): def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" super().__init__() A : List[str] = config A : Union[str, Any] = config.pool_scales # e.g. (1, 2, 3, 6) A : Optional[int] = in_channels A : List[str] = config.hidden_size A : str = False A : str = nn.Convad(self.channels , config.num_labels , kernel_size=1 ) # PSP Module A : Optional[int] = UperNetPyramidPoolingModule( self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , ) A : Union[str, Any] = UperNetConvModule( self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , ) # FPN Module A : Dict = nn.ModuleList() A : List[str] = nn.ModuleList() for in_channels in self.in_channels[:-1]: # skip the top layer A : int = UperNetConvModule(SCREAMING_SNAKE_CASE , self.channels , kernel_size=1 ) A : Union[str, Any] = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 ) self.lateral_convs.append(SCREAMING_SNAKE_CASE ) self.fpn_convs.append(SCREAMING_SNAKE_CASE ) A : Dict = UperNetConvModule( len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , ) def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" self.apply(self._init_weights ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" if isinstance(SCREAMING_SNAKE_CASE , nn.Convad ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" A : Union[str, Any] = inputs[-1] A : Optional[int] = [x] psp_outs.extend(self.psp_modules(SCREAMING_SNAKE_CASE ) ) A : str = torch.cat(SCREAMING_SNAKE_CASE , dim=1 ) A : str = self.bottleneck(SCREAMING_SNAKE_CASE ) return output def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> torch.Tensor: """simple docstring""" A : Union[str, Any] = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )] laterals.append(self.psp_forward(SCREAMING_SNAKE_CASE ) ) # build top-down path A : int = len(SCREAMING_SNAKE_CASE ) for i in range(used_backbone_levels - 1 , 0 , -1 ): A : Union[str, Any] = laterals[i - 1].shape[2:] A : Optional[Any] = laterals[i - 1] + nn.functional.interpolate( laterals[i] , size=SCREAMING_SNAKE_CASE , mode='''bilinear''' , align_corners=self.align_corners ) # build outputs A : List[str] = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )] # append psp feature fpn_outs.append(laterals[-1] ) for i in range(used_backbone_levels - 1 , 0 , -1 ): A : Any = nn.functional.interpolate( fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode='''bilinear''' , align_corners=self.align_corners ) A : Tuple = torch.cat(SCREAMING_SNAKE_CASE , dim=1 ) A : Any = self.fpn_bottleneck(SCREAMING_SNAKE_CASE ) A : Optional[int] = self.classifier(SCREAMING_SNAKE_CASE ) return output class A ( nn.Module ): def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 2 , SCREAMING_SNAKE_CASE = 3 , SCREAMING_SNAKE_CASE = 1 ) -> None: """simple docstring""" super().__init__() A : List[Any] = config A : Optional[Any] = config.auxiliary_in_channels A : Optional[Any] = config.auxiliary_channels A : Union[str, Any] = config.auxiliary_num_convs A : List[str] = config.auxiliary_concat_input A : Optional[Any] = in_index A : str = (kernel_size // 2) * dilation A : Tuple = [] convs.append( UperNetConvModule( self.in_channels , self.channels , kernel_size=SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , dilation=SCREAMING_SNAKE_CASE ) ) for i in range(self.num_convs - 1 ): convs.append( UperNetConvModule( self.channels , self.channels , kernel_size=SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , dilation=SCREAMING_SNAKE_CASE ) ) if self.num_convs == 0: A : Optional[Any] = nn.Identity() else: A : Tuple = nn.Sequential(*SCREAMING_SNAKE_CASE ) if self.concat_input: A : Any = UperNetConvModule( self.in_channels + self.channels , self.channels , kernel_size=SCREAMING_SNAKE_CASE , padding=kernel_size // 2 ) A : Optional[Any] = nn.Convad(self.channels , config.num_labels , kernel_size=1 ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" self.apply(self._init_weights ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" if isinstance(SCREAMING_SNAKE_CASE , nn.Convad ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> torch.Tensor: """simple docstring""" A : Union[str, Any] = encoder_hidden_states[self.in_index] A : Optional[Any] = self.convs(SCREAMING_SNAKE_CASE ) if self.concat_input: A : Optional[Any] = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) ) A : str = self.classifier(SCREAMING_SNAKE_CASE ) return output class A ( __snake_case ): __magic_name__ = UperNetConfig __magic_name__ = '''pixel_values''' __magic_name__ = True def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): module.backbone.init_weights() module.decode_head.init_weights() module.auxiliary_head.init_weights() def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" self.backbone.init_weights() self.decode_head.init_weights() self.auxiliary_head.init_weights() def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> List[str]: """simple docstring""" if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): A : int = value lowercase : List[str] = R'\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' lowercase : Union[str, Any] = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' @add_start_docstrings( '''UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.''' , __snake_case , ) class A ( __snake_case ): def __init__( self , SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" super().__init__(SCREAMING_SNAKE_CASE ) A : List[Any] = AutoBackbone.from_config(config.backbone_config ) # Semantic segmentation head(s) A : List[Any] = UperNetHead(SCREAMING_SNAKE_CASE , in_channels=self.backbone.channels ) A : str = UperNetFCNHead(SCREAMING_SNAKE_CASE ) if config.use_auxiliary_head else None # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('''batch_size, sequence_length''' ) ) @replace_return_docstrings(output_type=SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , ) -> Union[tuple, SemanticSegmenterOutput]: """simple docstring""" A : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict A : Optional[Any] = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) A : Any = output_attentions if output_attentions is not None else self.config.output_attentions A : Optional[int] = self.backbone.forward_with_filtered_kwargs( SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE , output_attentions=SCREAMING_SNAKE_CASE ) A : int = outputs.feature_maps A : Any = self.decode_head(SCREAMING_SNAKE_CASE ) A : str = nn.functional.interpolate(SCREAMING_SNAKE_CASE , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=SCREAMING_SNAKE_CASE ) A : List[Any] = None if self.auxiliary_head is not None: A : List[str] = self.auxiliary_head(SCREAMING_SNAKE_CASE ) A : Any = nn.functional.interpolate( SCREAMING_SNAKE_CASE , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=SCREAMING_SNAKE_CASE ) A : Optional[Any] = None if labels is not None: if self.config.num_labels == 1: raise ValueError('''The number of labels should be greater than one''' ) else: # compute weighted loss A : Optional[int] = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index ) A : str = loss_fct(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : List[str] = loss_fct(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : Optional[int] = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss if not return_dict: if output_hidden_states: A : Optional[Any] = (logits,) + outputs[1:] else: A : Tuple = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SemanticSegmenterOutput( loss=SCREAMING_SNAKE_CASE , logits=SCREAMING_SNAKE_CASE , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
3
"""simple docstring""" import unittest from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin _A = get_tests_dir("""fixtures/test_sentencepiece.model""") @require_sentencepiece @require_tokenizers class lowerCamelCase ( lowerCAmelCase__ , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE = XLNetTokenizer SCREAMING_SNAKE_CASE = XLNetTokenizerFast SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = True def _a (self ): """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing UpperCAmelCase__ : List[str] = XLNetTokenizer(_lowerCamelCase , keep_accents=_lowerCamelCase ) tokenizer.sanitize_special_tokens() tokenizer.save_pretrained(self.tmpdirname ) def _a (self ): """simple docstring""" UpperCAmelCase__ : str = """<s>""" UpperCAmelCase__ : str = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCamelCase ) , _lowerCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCamelCase ) , _lowerCamelCase ) def _a (self ): """simple docstring""" UpperCAmelCase__ : Tuple = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<unk>""" ) self.assertEqual(vocab_keys[1] , """<s>""" ) self.assertEqual(vocab_keys[-1] , """<eod>""" ) self.assertEqual(len(_lowerCamelCase ) , 1006 ) def _a (self ): """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 1000 ) def _a (self ): """simple docstring""" UpperCAmelCase__ : Optional[Any] = XLNetTokenizer(_lowerCamelCase , keep_accents=_lowerCamelCase ) UpperCAmelCase__ : List[Any] = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(_lowerCamelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , [285, 46, 10, 170, 382] ) UpperCAmelCase__ : Tuple = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( _lowerCamelCase , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) UpperCAmelCase__ : List[Any] = tokenizer.convert_tokens_to_ids(_lowerCamelCase ) self.assertListEqual(_lowerCamelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] ) UpperCAmelCase__ : int = tokenizer.convert_ids_to_tokens(_lowerCamelCase ) self.assertListEqual( _lowerCamelCase , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) def _a (self ): """simple docstring""" UpperCAmelCase__ : List[str] = XLNetTokenizer(_lowerCamelCase , do_lower_case=_lowerCamelCase ) UpperCAmelCase__ : str = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( _lowerCamelCase , [ SPIECE_UNDERLINE + """""", """i""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """se""", """.""", ] , ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""▁he""", """ll""", """o"""] ) def _a (self ): """simple docstring""" UpperCAmelCase__ : Tuple = XLNetTokenizer(_lowerCamelCase , do_lower_case=_lowerCamelCase ) UpperCAmelCase__ : Any = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( _lowerCamelCase , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """se""", """.""", ] , ) @slow def _a (self ): """simple docstring""" UpperCAmelCase__ : Dict = XLNetTokenizer.from_pretrained("""xlnet-base-cased""" ) UpperCAmelCase__ : int = tokenizer.encode("""sequence builders""" , add_special_tokens=_lowerCamelCase ) UpperCAmelCase__ : int = tokenizer.encode("""multi-sequence build""" , add_special_tokens=_lowerCamelCase ) UpperCAmelCase__ : str = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase ) UpperCAmelCase__ : Optional[int] = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase , _lowerCamelCase ) assert encoded_sentence == text + [4, 3] assert encoded_pair == text + [4] + text_a + [4, 3] @slow def _a (self ): """simple docstring""" UpperCAmelCase__ : Tuple = {"""input_ids""": [[17, 21442, 270, 17, 10, 14645, 318, 34, 17, 4546, 3145, 787, 13, 7752, 22018, 23, 21, 17, 4546, 3145, 787, 13, 3352, 14431, 13, 5500, 11, 1176, 580, 13, 16819, 4797, 23, 17, 10, 17135, 658, 19, 457, 7932, 13, 184, 19, 3154, 17135, 6468, 19, 1404, 12269, 19, 4229, 5356, 16264, 46, 19, 17, 20545, 10395, 9, 9, 9, 11, 28, 6421, 9531, 20729, 17, 10, 353, 17022, 11, 21, 6421, 9531, 16949, 17, 10, 11509, 753, 11, 33, 95, 2421, 7385, 956, 14431, 2626, 25, 842, 7385, 4836, 21, 1429, 2272, 9855, 3120, 161, 24738, 19, 13203, 658, 218, 787, 21, 430, 18482, 847, 2637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 22178, 27, 1064, 22, 956, 13, 11101, 1429, 5854, 24313, 18953, 40, 422, 24366, 68, 1758, 37, 10483, 14257, 31, 207, 263, 21, 203, 3773, 25, 71, 9735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2049, 3442, 17, 13894, 3380, 23, 95, 18, 17634, 2288, 9, 4, 3]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_lowerCamelCase , model_name="""xlnet-base-cased""" , revision="""c841166438c31ec7ca9a106dee7bb312b73ae511""" , )
171
0
"""simple docstring""" import warnings from ...utils import logging from .image_processing_perceiver import PerceiverImageProcessor lowercase__ = logging.get_logger(__name__) class __snake_case ( lowercase_ ): def __init__( self , *lowercase , **lowercase) -> None: '''simple docstring''' warnings.warn( 'The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use PerceiverImageProcessor instead.' , a__ , ) super().__init__(*a__ , **a__)
354
"""simple docstring""" def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Tuple: _enforce_args(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if n == 0: return 0 a__: List[Any] = float('-inf' ) for i in range(1 , n + 1 ): a__: Optional[Any] = max( _SCREAMING_SNAKE_CASE , prices[i - 1] + naive_cut_rod_recursive(n - i , _SCREAMING_SNAKE_CASE ) ) return max_revue def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str: _enforce_args(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) a__: str = [float('-inf' ) for _ in range(n + 1 )] return _top_down_cut_rod_recursive(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[Any]: if max_rev[n] >= 0: return max_rev[n] elif n == 0: return 0 else: a__: Dict = float('-inf' ) for i in range(1 , n + 1 ): a__: Optional[Any] = max( _SCREAMING_SNAKE_CASE , prices[i - 1] + _top_down_cut_rod_recursive(n - i , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , ) a__: Optional[int] = max_revenue return max_rev[n] def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Dict: _enforce_args(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of # length 0. a__: str = [float('-inf' ) for _ in range(n + 1 )] a__: Tuple = 0 for i in range(1 , n + 1 ): a__: List[str] = max_rev[i] for j in range(1 , i + 1 ): a__: Tuple = max(_SCREAMING_SNAKE_CASE , prices[j - 1] + max_rev[i - j] ) a__: Union[str, Any] = max_revenue_i return max_rev[n] def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Any: if n < 0: a__: Optional[int] = F'n must be greater than or equal to 0. Got n = {n}' raise ValueError(_SCREAMING_SNAKE_CASE ) if n > len(_SCREAMING_SNAKE_CASE ): a__: List[str] = ( 'Each integral piece of rod must have a corresponding price. ' F'Got n = {n} but length of prices = {len(_SCREAMING_SNAKE_CASE )}' ) raise ValueError(_SCREAMING_SNAKE_CASE ) def __a ( ) ->str: a__: int = [6, 10, 12, 15, 20, 23] a__: Union[str, Any] = len(_SCREAMING_SNAKE_CASE ) # the best revenue comes from cutting the rod into 6 pieces, each # of length 1 resulting in a revenue of 6 * 6 = 36. a__: Any = 36 a__: Optional[int] = top_down_cut_rod(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) a__: List[Any] = bottom_up_cut_rod(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) a__: int = naive_cut_rod_recursive(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) assert expected_max_revenue == max_rev_top_down assert max_rev_top_down == max_rev_bottom_up assert max_rev_bottom_up == max_rev_naive if __name__ == "__main__": main()
203
0
from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL import torch from transformers import CLIPImageProcessor, CLIPVisionModel from ...models import PriorTransformer from ...pipelines import DiffusionPipeline from ...schedulers import HeunDiscreteScheduler from ...utils import ( BaseOutput, is_accelerate_available, logging, randn_tensor, replace_example_docstring, ) from .renderer import ShapERenderer lowercase : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name lowercase : Optional[int] = """ Examples: ```py >>> from PIL import Image >>> import torch >>> from diffusers import DiffusionPipeline >>> from diffusers.utils import export_to_gif, load_image >>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") >>> repo = \"openai/shap-e-img2img\" >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16) >>> pipe = pipe.to(device) >>> guidance_scale = 3.0 >>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\" >>> image = load_image(image_url).convert(\"RGB\") >>> images = pipe( ... image, ... guidance_scale=guidance_scale, ... num_inference_steps=64, ... frame_size=256, ... ).images >>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\") ``` """ @dataclass class A__ ( __UpperCAmelCase ): """simple docstring""" __A : Union[PIL.Image.Image, np.ndarray] class A__ ( __UpperCAmelCase ): """simple docstring""" def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> List[str]: '''simple docstring''' super().__init__() self.register_modules( prior=lowercase , image_encoder=lowercase , image_processor=lowercase , scheduler=lowercase , renderer=lowercase , ) def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> Any: '''simple docstring''' if latents is None: a__ : List[Any] = randn_tensor(lowercase , generator=lowercase , device=lowercase , dtype=lowercase) else: if latents.shape != shape: raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}') a__ : List[str] = latents.to(lowercase) a__ : Dict = latents * scheduler.init_noise_sigma return latents def __lowercase ( self , lowercase=0) -> Optional[int]: '''simple docstring''' if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError('Please install accelerate via `pip install accelerate`') a__ : Any = torch.device(F'cuda:{gpu_id}') a__ : Optional[Any] = [self.image_encoder, self.prior] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(lowercase , lowercase) @property def __lowercase ( self) -> Any: '''simple docstring''' if self.device != torch.device('meta') or not hasattr(self.image_encoder , '_hf_hook'): return self.device for module in self.image_encoder.modules(): if ( hasattr(lowercase , '_hf_hook') and hasattr(module._hf_hook , 'execution_device') and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device) return self.device def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , ) -> Any: '''simple docstring''' if isinstance(lowercase , lowercase) and isinstance(image[0] , torch.Tensor): a__ : Dict = torch.cat(lowercase , axis=0) if image[0].ndim == 4 else torch.stack(lowercase , axis=0) if not isinstance(lowercase , torch.Tensor): a__ : List[Any] = self.image_processor(lowercase , return_tensors='pt').pixel_values[0].unsqueeze(0) a__ : Optional[Any] = image.to(dtype=self.image_encoder.dtype , device=lowercase) a__ : int = self.image_encoder(lowercase)['last_hidden_state'] a__ : Optional[Any] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256 a__ : Optional[int] = image_embeds.repeat_interleave(lowercase , dim=0) if do_classifier_free_guidance: a__ : Tuple = torch.zeros_like(lowercase) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes a__ : int = torch.cat([negative_image_embeds, image_embeds]) return image_embeds @torch.no_grad() @replace_example_docstring(lowercase) def __call__( self , lowercase , lowercase = 1 , lowercase = 25 , lowercase = None , lowercase = None , lowercase = 4.0 , lowercase = 64 , lowercase = "pil" , lowercase = True , ) -> Tuple: '''simple docstring''' if isinstance(lowercase , PIL.Image.Image): a__ : List[str] = 1 elif isinstance(lowercase , torch.Tensor): a__ : List[str] = image.shape[0] elif isinstance(lowercase , lowercase) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image)): a__ : List[str] = len(lowercase) else: raise ValueError( F'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(lowercase)}') a__ : Tuple = self._execution_device a__ : List[Any] = batch_size * num_images_per_prompt a__ : Optional[Any] = guidance_scale > 1.0 a__ : Optional[int] = self._encode_image(lowercase , lowercase , lowercase , lowercase) # prior self.scheduler.set_timesteps(lowercase , device=lowercase) a__ : str = self.scheduler.timesteps a__ : Tuple = self.prior.config.num_embeddings a__ : Optional[int] = self.prior.config.embedding_dim a__ : Dict = self.prepare_latents( (batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , lowercase , lowercase , lowercase , self.scheduler , ) # YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim a__ : Tuple = latents.reshape(latents.shape[0] , lowercase , lowercase) for i, t in enumerate(self.progress_bar(lowercase)): # expand the latents if we are doing classifier free guidance a__ : List[Any] = torch.cat([latents] * 2) if do_classifier_free_guidance else latents a__ : Optional[int] = self.scheduler.scale_model_input(lowercase , lowercase) a__ : Tuple = self.prior( lowercase , timestep=lowercase , proj_embedding=lowercase , ).predicted_image_embedding # remove the variance a__ , a__ : Any = noise_pred.split( scaled_model_input.shape[2] , dim=2) # batch_size, num_embeddings, embedding_dim if do_classifier_free_guidance is not None: a__ , a__ : Any = noise_pred.chunk(2) a__ : str = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond) a__ : List[str] = self.scheduler.step( lowercase , timestep=lowercase , sample=lowercase , ).prev_sample if output_type == "latent": return ShapEPipelineOutput(images=lowercase) a__ : List[Any] = [] for i, latent in enumerate(lowercase): print() a__ : Dict = self.renderer.decode( latent[None, :] , lowercase , size=lowercase , ray_batch_size=4096 , n_coarse_samples=64 , n_fine_samples=128 , ) images.append(lowercase) a__ : Union[str, Any] = torch.stack(lowercase) if output_type not in ["np", "pil"]: raise ValueError(F'Only the output types `pil` and `np` are supported not output_type={output_type}') a__ : List[Any] = images.cpu().numpy() if output_type == "pil": a__ : Any = [self.numpy_to_pil(lowercase) for image in images] # Offload last model to CPU if hasattr(self , 'final_offload_hook') and self.final_offload_hook is not None: self.final_offload_hook.offload() if not return_dict: return (images,) return ShapEPipelineOutput(images=lowercase)
99
from typing import List, Optional, Union import numpy as np import PIL import torch from PIL import Image from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) UpperCAmelCase__ : Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name UpperCAmelCase__ : Dict = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n' def lowerCamelCase__ ( a , a , a=8 ) -> List[Any]: _A: int = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 _A: str = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor def lowerCamelCase__ ( a , a=5_12 , a=5_12 ) -> Dict: _A: Union[str, Any] = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 ) _A: Tuple = np.array(pil_image.convert('''RGB''' ) ) _A: List[str] = arr.astype(np.floataa ) / 127.5 - 1 _A: Tuple = np.transpose(a , [2, 0, 1] ) _A: Any = torch.from_numpy(a ).unsqueeze(0 ) return image class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' def __init__( self : int , lowerCAmelCase_ : UNetaDConditionModel , lowerCAmelCase_ : DDPMScheduler , lowerCAmelCase_ : VQModel , ): """simple docstring""" super().__init__() self.register_modules( unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ , movq=lowerCAmelCase_ , ) _A: List[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1) def __magic_name__ ( self : List[str] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] ): """simple docstring""" # get the original timestep using init_timestep _A: Union[str, Any] = min(int(num_inference_steps * strength ) , lowerCAmelCase_ ) _A: str = max(num_inference_steps - init_timestep , 0 ) _A: str = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def __magic_name__ ( self : List[str] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int]=None ): """simple docstring""" if not isinstance(lowerCAmelCase_ , (torch.Tensor, PIL.Image.Image, list) ): raise ValueError( F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowerCAmelCase_ )}""" ) _A: Optional[int] = image.to(device=lowerCAmelCase_ , dtype=lowerCAmelCase_ ) _A: Union[str, Any] = batch_size * num_images_per_prompt if image.shape[1] == 4: _A: Optional[int] = image else: if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and len(lowerCAmelCase_ ) != batch_size: raise ValueError( F"""You have passed a list of generators of length {len(lowerCAmelCase_ )}, but requested an effective batch""" F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" ) elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): _A: List[Any] = [ self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(lowerCAmelCase_ ) ] _A: Optional[Any] = torch.cat(lowerCAmelCase_ , dim=0 ) else: _A: Optional[int] = self.movq.encode(lowerCAmelCase_ ).latent_dist.sample(lowerCAmelCase_ ) _A: int = self.movq.config.scaling_factor * init_latents _A: Optional[Any] = torch.cat([init_latents] , dim=0 ) _A: Any = init_latents.shape _A: Optional[Any] = randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_ , dtype=lowerCAmelCase_ ) # get latents _A: Union[str, Any] = self.scheduler.add_noise(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) _A: List[str] = init_latents return latents def __magic_name__ ( self : Optional[int] , lowerCAmelCase_ : Optional[int]=0 ): """simple docstring""" if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError('''Please install accelerate via `pip install accelerate`''' ) _A: Any = torch.device(F"""cuda:{gpu_id}""" ) _A: int = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(lowerCAmelCase_ , lowerCAmelCase_ ) def __magic_name__ ( self : Any , lowerCAmelCase_ : Any=0 ): """simple docstring""" if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ): from accelerate import cpu_offload_with_hook else: raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' ) _A: Any = torch.device(F"""cuda:{gpu_id}""" ) if self.device.type != "cpu": self.to('''cpu''' , silence_dtype_warnings=lowerCAmelCase_ ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) _A: int = None for cpu_offloaded_model in [self.unet, self.movq]: _A , _A: List[Any] = cpu_offload_with_hook(lowerCAmelCase_ , lowerCAmelCase_ , prev_module_hook=lowerCAmelCase_ ) # We'll offload the last model manually. _A: Tuple = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def __magic_name__ ( self : List[Any] ): """simple docstring""" if not hasattr(self.unet , '''_hf_hook''' ): return self.device for module in self.unet.modules(): if ( hasattr(lowerCAmelCase_ , '''_hf_hook''' ) and hasattr(module._hf_hook , '''execution_device''' ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(lowerCAmelCase_ ) def __call__( self : Optional[Any] , lowerCAmelCase_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowerCAmelCase_ : Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] , lowerCAmelCase_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowerCAmelCase_ : int = 5_1_2 , lowerCAmelCase_ : int = 5_1_2 , lowerCAmelCase_ : int = 1_0_0 , lowerCAmelCase_ : float = 4.0 , lowerCAmelCase_ : float = 0.3 , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCAmelCase_ : Optional[str] = "pil" , lowerCAmelCase_ : bool = True , ): """simple docstring""" _A: Any = self._execution_device _A: Any = guidance_scale > 1.0 if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): _A: Any = torch.cat(lowerCAmelCase_ , dim=0 ) _A: int = image_embeds.shape[0] if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): _A: Dict = torch.cat(lowerCAmelCase_ , dim=0 ) if do_classifier_free_guidance: _A: Any = image_embeds.repeat_interleave(lowerCAmelCase_ , dim=0 ) _A: str = negative_image_embeds.repeat_interleave(lowerCAmelCase_ , dim=0 ) _A: Dict = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowerCAmelCase_ ) if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): _A: List[str] = [image] if not all(isinstance(lowerCAmelCase_ , (PIL.Image.Image, torch.Tensor) ) for i in image ): raise ValueError( F"""Input is in incorrect format: {[type(lowerCAmelCase_ ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" ) _A: List[str] = torch.cat([prepare_image(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) for i in image] , dim=0 ) _A: Tuple = image.to(dtype=image_embeds.dtype , device=lowerCAmelCase_ ) _A: Optional[Any] = self.movq.encode(lowerCAmelCase_ )['''latents'''] _A: Optional[int] = latents.repeat_interleave(lowerCAmelCase_ , dim=0 ) self.scheduler.set_timesteps(lowerCAmelCase_ , device=lowerCAmelCase_ ) _A , _A: List[Any] = self.get_timesteps(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) _A: Dict = timesteps[:1].repeat(batch_size * num_images_per_prompt ) _A , _A: Optional[int] = downscale_height_and_width(lowerCAmelCase_ , lowerCAmelCase_ , self.movq_scale_factor ) _A: Any = self.prepare_latents( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , image_embeds.dtype , lowerCAmelCase_ , lowerCAmelCase_ ) for i, t in enumerate(self.progress_bar(lowerCAmelCase_ ) ): # expand the latents if we are doing classifier free guidance _A: Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents _A: str = {'''image_embeds''': image_embeds} _A: Optional[int] = self.unet( sample=lowerCAmelCase_ , timestep=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , added_cond_kwargs=lowerCAmelCase_ , return_dict=lowerCAmelCase_ , )[0] if do_classifier_free_guidance: _A , _A: str = noise_pred.split(latents.shape[1] , dim=1 ) _A , _A: int = noise_pred.chunk(2 ) _A , _A: int = variance_pred.chunk(2 ) _A: Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) _A: List[str] = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , '''variance_type''' ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): _A , _A: Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 _A: Any = self.scheduler.step( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ , )[0] # post-processing _A: Tuple = self.movq.decode(lowerCAmelCase_ , force_not_quantize=lowerCAmelCase_ )['''sample'''] if output_type not in ["pt", "np", "pil"]: raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" ) if output_type in ["np", "pil"]: _A: int = image * 0.5 + 0.5 _A: Any = image.clamp(0 , 1 ) _A: Any = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": _A: Union[str, Any] = self.numpy_to_pil(lowerCAmelCase_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=lowerCAmelCase_ )
121
0
import warnings from typing import Dict import numpy as np from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING def A_ ( _lowerCAmelCase ) -> List[str]: return 1.0 / (1.0 + np.exp(-_outputs )) def A_ ( _lowerCAmelCase ) -> Optional[Any]: UpperCamelCase : List[Any] = np.max(_outputs , axis=-1 , keepdims=a_ ) UpperCamelCase : Tuple = np.exp(_outputs - maxes ) return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=a_ ) class A__ ( __snake_case ): _UpperCAmelCase :str = '''sigmoid''' _UpperCAmelCase :Optional[Any] = '''softmax''' _UpperCAmelCase :int = '''none''' @add_end_docstrings( __snake_case , r'\n return_all_scores (`bool`, *optional*, defaults to `False`):\n Whether to return all prediction scores or just the one of the predicted class.\n function_to_apply (`str`, *optional*, defaults to `"default"`):\n The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:\n\n - `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model\n has several labels, will apply the softmax function on the output.\n - `"sigmoid"`: Applies the sigmoid function on the output.\n - `"softmax"`: Applies the softmax function on the output.\n - `"none"`: Does not apply any function on the output.\n ' , ) class A__ ( __snake_case ): _UpperCAmelCase :Dict = False _UpperCAmelCase :int = ClassificationFunction.NONE def __init__( self , **A_ ): '''simple docstring''' super().__init__(**lowerCAmelCase__ ) self.check_model_type( TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if self.framework == "tf" else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING ) def __UpperCamelCase( self , A_=None , A_=None , A_="" , **A_ ): '''simple docstring''' UpperCamelCase : Any = tokenizer_kwargs UpperCamelCase : Any = {} if hasattr(self.model.config , "return_all_scores" ) and return_all_scores is None: UpperCamelCase : Any = self.model.config.return_all_scores if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or top_k is None: UpperCamelCase : int = top_k UpperCamelCase : Optional[Any] = False elif return_all_scores is not None: warnings.warn( "`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of" " `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`." , lowerCAmelCase__ , ) if return_all_scores: UpperCamelCase : Dict = None else: UpperCamelCase : Dict = 1 if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): UpperCamelCase : List[Any] = ClassificationFunction[function_to_apply.upper()] if function_to_apply is not None: UpperCamelCase : Dict = function_to_apply return preprocess_params, {}, postprocess_params def __call__( self , *A_ , **A_ ): '''simple docstring''' UpperCamelCase : Any = super().__call__(*lowerCAmelCase__ , **lowerCAmelCase__ ) # TODO try and retrieve it in a nicer way from _sanitize_parameters. UpperCamelCase : Union[str, Any] = "top_k" not in kwargs if isinstance(args[0] , lowerCAmelCase__ ) and _legacy: # This pipeline is odd, and return a list when single item is run return [result] else: return result def __UpperCamelCase( self , A_ , **A_ ): '''simple docstring''' UpperCamelCase : Optional[Any] = self.framework if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): return self.tokenizer(**lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ ) elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and len(lowerCAmelCase__ ) == 1 and isinstance(inputs[0] , lowerCAmelCase__ ) and len(inputs[0] ) == 2: # It used to be valid to use a list of list of list for text pairs, keeping this path for BC return self.tokenizer( text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ ) elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): # This is likely an invalid usage of the pipeline attempting to pass text pairs. raise ValueError( "The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a" " dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair." ) return self.tokenizer(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ ) def __UpperCamelCase( self , A_ ): '''simple docstring''' return self.model(**lowerCAmelCase__ ) def __UpperCamelCase( self , A_ , A_=None , A_=1 , A_=True ): '''simple docstring''' if function_to_apply is None: if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1: UpperCamelCase : Tuple = ClassificationFunction.SIGMOID elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1: UpperCamelCase : Tuple = ClassificationFunction.SOFTMAX elif hasattr(self.model.config , "function_to_apply" ) and function_to_apply is None: UpperCamelCase : Dict = self.model.config.function_to_apply else: UpperCamelCase : Union[str, Any] = ClassificationFunction.NONE UpperCamelCase : Optional[int] = model_outputs["logits"][0] UpperCamelCase : Any = outputs.numpy() if function_to_apply == ClassificationFunction.SIGMOID: UpperCamelCase : List[Any] = sigmoid(lowerCAmelCase__ ) elif function_to_apply == ClassificationFunction.SOFTMAX: UpperCamelCase : str = softmax(lowerCAmelCase__ ) elif function_to_apply == ClassificationFunction.NONE: UpperCamelCase : Any = outputs else: raise ValueError(F"""Unrecognized `function_to_apply` argument: {function_to_apply}""" ) if top_k == 1 and _legacy: return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()} UpperCamelCase : str = [ {"label": self.model.config.idalabel[i], "score": score.item()} for i, score in enumerate(lowerCAmelCase__ ) ] if not _legacy: dict_scores.sort(key=lambda A_ : x["score"] , reverse=lowerCAmelCase__ ) if top_k is not None: UpperCamelCase : Dict = dict_scores[:top_k] return dict_scores
367
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) __lowerCamelCase : str = {"""configuration_encoder_decoder""": ["""EncoderDecoderConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Optional[Any] = ["""EncoderDecoderModel"""] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Optional[Any] = ["""TFEncoderDecoderModel"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Union[str, Any] = ["""FlaxEncoderDecoderModel"""] if TYPE_CHECKING: from .configuration_encoder_decoder import EncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encoder_decoder import EncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_encoder_decoder import TFEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel else: import sys __lowerCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
140
0
"""simple docstring""" import argparse import os import re import numpy as np import PIL import torch from timm import create_model from torch.optim.lr_scheduler import OneCycleLR from torch.utils.data import DataLoader, Dataset from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor from accelerate import Accelerator def _lowerCAmelCase ( UpperCamelCase_ ): __SCREAMING_SNAKE_CASE = fname.split(os.path.sep )[-1] return re.search(r"""^(.*)_\d+\.jpg$""" , lowerCamelCase__ ).groups()[0] class SCREAMING_SNAKE_CASE_ ( _UpperCamelCase ): """simple docstring""" def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None): __SCREAMING_SNAKE_CASE = file_names __SCREAMING_SNAKE_CASE = image_transform __SCREAMING_SNAKE_CASE = label_to_id def __len__( self): return len(self.file_names) def __getitem__( self , lowerCAmelCase__): __SCREAMING_SNAKE_CASE = self.file_names[idx] __SCREAMING_SNAKE_CASE = PIL.Image.open(lowerCAmelCase__) __SCREAMING_SNAKE_CASE = raw_image.convert("""RGB""") if self.image_transform is not None: __SCREAMING_SNAKE_CASE = self.image_transform(lowerCAmelCase__) __SCREAMING_SNAKE_CASE = extract_label(lowerCAmelCase__) if self.label_to_id is not None: __SCREAMING_SNAKE_CASE = self.label_to_id[label] return {"image": image, "label": label} def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ): if args.with_tracking: __SCREAMING_SNAKE_CASE = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="""all""" , project_dir=args.project_dir ) else: __SCREAMING_SNAKE_CASE = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __SCREAMING_SNAKE_CASE = config["lr"] __SCREAMING_SNAKE_CASE = int(config["""num_epochs"""] ) __SCREAMING_SNAKE_CASE = int(config["""seed"""] ) __SCREAMING_SNAKE_CASE = int(config["""batch_size"""] ) __SCREAMING_SNAKE_CASE = config["image_size"] if not isinstance(lowerCamelCase__ , (list, tuple) ): __SCREAMING_SNAKE_CASE = (image_size, image_size) # Parse out whether we are saving every epoch or after a certain number of batches if hasattr(args.checkpointing_steps , """isdigit""" ): if args.checkpointing_steps == "epoch": __SCREAMING_SNAKE_CASE = args.checkpointing_steps elif args.checkpointing_steps.isdigit(): __SCREAMING_SNAKE_CASE = int(args.checkpointing_steps ) else: raise ValueError( f"Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed." ) else: __SCREAMING_SNAKE_CASE = None # We need to initialize the trackers we use, and also store our configuration if args.with_tracking: __SCREAMING_SNAKE_CASE = os.path.split(lowerCamelCase__ )[-1].split(""".""" )[0] accelerator.init_trackers(lowerCamelCase__ , lowerCamelCase__ ) # Grab all the image filenames __SCREAMING_SNAKE_CASE = [os.path.join(args.data_dir , lowerCamelCase__ ) for fname in os.listdir(args.data_dir ) if fname.endswith(""".jpg""" )] # Build the label correspondences __SCREAMING_SNAKE_CASE = [extract_label(lowerCamelCase__ ) for fname in file_names] __SCREAMING_SNAKE_CASE = list(set(lowerCamelCase__ ) ) id_to_label.sort() __SCREAMING_SNAKE_CASE = {lbl: i for i, lbl in enumerate(lowerCamelCase__ )} # Set the seed before splitting the data. np.random.seed(lowerCamelCase__ ) torch.manual_seed(lowerCamelCase__ ) torch.cuda.manual_seed_all(lowerCamelCase__ ) # Split our filenames between train and validation __SCREAMING_SNAKE_CASE = np.random.permutation(len(lowerCamelCase__ ) ) __SCREAMING_SNAKE_CASE = int(0.8 * len(lowerCamelCase__ ) ) __SCREAMING_SNAKE_CASE = random_perm[:cut] __SCREAMING_SNAKE_CASE = random_perm[cut:] # For training we use a simple RandomResizedCrop __SCREAMING_SNAKE_CASE = Compose([RandomResizedCrop(lowerCamelCase__ , scale=(0.5, 1.0) ), ToTensor()] ) __SCREAMING_SNAKE_CASE = PetsDataset( [file_names[i] for i in train_split] , image_transform=lowerCamelCase__ , label_to_id=lowerCamelCase__ ) # For evaluation, we use a deterministic Resize __SCREAMING_SNAKE_CASE = Compose([Resize(lowerCamelCase__ ), ToTensor()] ) __SCREAMING_SNAKE_CASE = PetsDataset([file_names[i] for i in eval_split] , image_transform=lowerCamelCase__ , label_to_id=lowerCamelCase__ ) # Instantiate dataloaders. __SCREAMING_SNAKE_CASE = DataLoader(lowerCamelCase__ , shuffle=lowerCamelCase__ , batch_size=lowerCamelCase__ , num_workers=4 ) __SCREAMING_SNAKE_CASE = DataLoader(lowerCamelCase__ , shuffle=lowerCamelCase__ , batch_size=lowerCamelCase__ , num_workers=4 ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) __SCREAMING_SNAKE_CASE = create_model("""resnet50d""" , pretrained=lowerCamelCase__ , num_classes=len(lowerCamelCase__ ) ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). __SCREAMING_SNAKE_CASE = model.to(accelerator.device ) # Freezing the base model for param in model.parameters(): __SCREAMING_SNAKE_CASE = False for param in model.get_classifier().parameters(): __SCREAMING_SNAKE_CASE = True # We normalize the batches of images to be a bit faster. __SCREAMING_SNAKE_CASE = torch.tensor(model.default_cfg["""mean"""] )[None, :, None, None].to(accelerator.device ) __SCREAMING_SNAKE_CASE = torch.tensor(model.default_cfg["""std"""] )[None, :, None, None].to(accelerator.device ) # Instantiate optimizer __SCREAMING_SNAKE_CASE = torch.optim.Adam(params=model.parameters() , lr=lr / 25 ) # Instantiate learning rate scheduler __SCREAMING_SNAKE_CASE = OneCycleLR(optimizer=lowerCamelCase__ , max_lr=lowerCamelCase__ , epochs=lowerCamelCase__ , steps_per_epoch=len(lowerCamelCase__ ) ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __SCREAMING_SNAKE_CASE = accelerator.prepare( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # We need to keep track of how many total steps we have iterated over __SCREAMING_SNAKE_CASE = 0 # We also need to keep track of the starting epoch so files are named properly __SCREAMING_SNAKE_CASE = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "": accelerator.print(f"Resumed from checkpoint: {args.resume_from_checkpoint}" ) accelerator.load_state(args.resume_from_checkpoint ) __SCREAMING_SNAKE_CASE = os.path.basename(args.resume_from_checkpoint ) else: # Get the most recent checkpoint __SCREAMING_SNAKE_CASE = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()] dirs.sort(key=os.path.getctime ) __SCREAMING_SNAKE_CASE = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last # Extract `epoch_{i}` or `step_{i}` __SCREAMING_SNAKE_CASE = os.path.splitext(lowerCamelCase__ )[0] if "epoch" in training_difference: __SCREAMING_SNAKE_CASE = int(training_difference.replace("""epoch_""" , """""" ) ) + 1 __SCREAMING_SNAKE_CASE = None else: __SCREAMING_SNAKE_CASE = int(training_difference.replace("""step_""" , """""" ) ) __SCREAMING_SNAKE_CASE = resume_step // len(lowerCamelCase__ ) resume_step -= starting_epoch * len(lowerCamelCase__ ) # Now we train the model for epoch in range(lowerCamelCase__ , lowerCamelCase__ ): model.train() if args.with_tracking: __SCREAMING_SNAKE_CASE = 0 if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None: # We need to skip steps until we reach the resumed step __SCREAMING_SNAKE_CASE = accelerator.skip_first_batches(lowerCamelCase__ , lowerCamelCase__ ) overall_step += resume_step else: # After the first iteration though, we need to go back to the original dataloader __SCREAMING_SNAKE_CASE = train_dataloader for batch in active_dataloader: # We could avoid this line since we set the accelerator with `device_placement=True`. __SCREAMING_SNAKE_CASE = {k: v.to(accelerator.device ) for k, v in batch.items()} __SCREAMING_SNAKE_CASE = (batch["image"] - mean) / std __SCREAMING_SNAKE_CASE = model(lowerCamelCase__ ) __SCREAMING_SNAKE_CASE = torch.nn.functional.cross_entropy(lowerCamelCase__ , batch["""label"""] ) # We keep track of the loss at each epoch if args.with_tracking: total_loss += loss.detach().float() accelerator.backward(lowerCamelCase__ ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 if isinstance(lowerCamelCase__ , lowerCamelCase__ ): __SCREAMING_SNAKE_CASE = f"step_{overall_step}" if overall_step % checkpointing_steps == 0: if args.output_dir is not None: __SCREAMING_SNAKE_CASE = os.path.join(args.output_dir , lowerCamelCase__ ) accelerator.save_state(lowerCamelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 0 for step, batch in enumerate(lowerCamelCase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. __SCREAMING_SNAKE_CASE = {k: v.to(accelerator.device ) for k, v in batch.items()} __SCREAMING_SNAKE_CASE = (batch["image"] - mean) / std with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(lowerCamelCase__ ) __SCREAMING_SNAKE_CASE = outputs.argmax(dim=-1 ) __SCREAMING_SNAKE_CASE = accelerator.gather_for_metrics((predictions, batch["""label"""]) ) __SCREAMING_SNAKE_CASE = predictions == references num_elems += accurate_preds.shape[0] accurate += accurate_preds.long().sum() __SCREAMING_SNAKE_CASE = accurate.item() / num_elems # Use accelerator.print to print only on the main process. accelerator.print(f"epoch {epoch}: {100 * eval_metric:.2f}" ) if args.with_tracking: accelerator.log( { """accuracy""": 100 * eval_metric, """train_loss""": total_loss.item() / len(lowerCamelCase__ ), """epoch""": epoch, } , step=lowerCamelCase__ , ) if checkpointing_steps == "epoch": __SCREAMING_SNAKE_CASE = f"epoch_{epoch}" if args.output_dir is not None: __SCREAMING_SNAKE_CASE = os.path.join(args.output_dir , lowerCamelCase__ ) accelerator.save_state(lowerCamelCase__ ) if args.with_tracking: accelerator.end_training() def _lowerCAmelCase ( ): __SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument("""--data_dir""" , required=lowerCamelCase__ , help="""The data folder on disk.""" ) parser.add_argument("""--fp16""" , action="""store_true""" , help="""If passed, will use FP16 training.""" ) parser.add_argument( """--mixed_precision""" , type=lowerCamelCase__ , default=lowerCamelCase__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" ) parser.add_argument( """--checkpointing_steps""" , type=lowerCamelCase__ , default=lowerCamelCase__ , help="""Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.""" , ) parser.add_argument( """--output_dir""" , type=lowerCamelCase__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , ) parser.add_argument( """--resume_from_checkpoint""" , type=lowerCamelCase__ , default=lowerCamelCase__ , help="""If the training should continue from a checkpoint folder.""" , ) parser.add_argument( """--with_tracking""" , action="""store_true""" , help="""Whether to load in all available experiment trackers from the environment and use them for logging.""" , ) parser.add_argument( """--project_dir""" , type=lowerCamelCase__ , default="""logs""" , help="""Location on where to store experiment tracking logs` and relevent project information""" , ) __SCREAMING_SNAKE_CASE = parser.parse_args() __SCREAMING_SNAKE_CASE = {"lr": 3e-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 224} training_function(lowerCamelCase__ , lowerCamelCase__ ) if __name__ == "__main__": main()
100
import json import os import pickle import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers import is_faiss_available from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bart.tokenization_bart import BartTokenizer from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch if is_faiss_available(): import faiss @require_faiss class snake_case__(_UpperCamelCase ): """simple docstring""" def snake_case ( self : Optional[Any] ): lowercase__ : str = tempfile.mkdtemp() lowercase__ : Optional[Any] = 8 # DPR tok lowercase__ : Dict = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] lowercase__ : List[Any] = os.path.join(self.tmpdirname , "dpr_tokenizer" ) os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE ) lowercase__ : str = os.path.join(SCREAMING_SNAKE_CASE , DPR_VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) # BART tok lowercase__ : Optional[Any] = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] lowercase__ : List[str] = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) ) lowercase__ : Dict = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] lowercase__ : List[Any] = {"unk_token": "<unk>"} lowercase__ : Any = os.path.join(self.tmpdirname , "bart_tokenizer" ) os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE ) lowercase__ : Tuple = os.path.join(SCREAMING_SNAKE_CASE , BART_VOCAB_FILES_NAMES["vocab_file"] ) lowercase__ : str = os.path.join(SCREAMING_SNAKE_CASE , BART_VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(SCREAMING_SNAKE_CASE ) ) def snake_case ( self : Any ): return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) ) def snake_case ( self : Any ): return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) ) def snake_case ( self : Any ): return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer" ) ) def snake_case ( self : Tuple ): shutil.rmtree(self.tmpdirname ) def snake_case ( self : Optional[int] ): lowercase__ : int = Dataset.from_dict( { "id": ["0", "1"], "text": ["foo", "bar"], "title": ["Foo", "Bar"], "embeddings": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )], } ) dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT ) return dataset def snake_case ( self : List[str] ): lowercase__ : Union[str, Any] = self.get_dummy_dataset() lowercase__ : str = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , ) with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset: lowercase__ : Union[str, Any] = dataset lowercase__ : List[str] = RagRetriever( SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) return retriever def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : bool ): lowercase__ : Union[str, Any] = self.get_dummy_dataset() lowercase__ : Optional[int] = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="custom" , ) if from_disk: lowercase__ : Any = os.path.join(self.tmpdirname , "dataset" ) lowercase__ : Union[str, Any] = os.path.join(self.tmpdirname , "index.faiss" ) dataset.get_index("embeddings" ).save(os.path.join(self.tmpdirname , "index.faiss" ) ) dataset.drop_index("embeddings" ) dataset.save_to_disk(os.path.join(self.tmpdirname , "dataset" ) ) del dataset lowercase__ : Tuple = RagRetriever( SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) else: lowercase__ : Dict = RagRetriever( SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , SCREAMING_SNAKE_CASE ) , ) return retriever def snake_case ( self : Tuple ): lowercase__ : Optional[int] = Dataset.from_dict( { "id": ["0", "1"], "text": ["foo", "bar"], "title": ["Foo", "Bar"], "embeddings": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )], } ) dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT ) lowercase__ : Union[str, Any] = os.path.join(self.tmpdirname , "hf_bert_base.hnswSQ8_correct_phi_128.c_index" ) dataset.save_faiss_index("embeddings" , index_file_name + ".index.dpr" ) pickle.dump(dataset["id"] , open(index_file_name + ".index_meta.dpr" , "wb" ) ) lowercase__ : Optional[int] = os.path.join(self.tmpdirname , "psgs_w100.tsv.pkl" ) lowercase__ : List[str] = {sample["id"]: [sample["text"], sample["title"]] for sample in dataset} pickle.dump(SCREAMING_SNAKE_CASE , open(SCREAMING_SNAKE_CASE , "wb" ) ) lowercase__ : int = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="legacy" , index_path=self.tmpdirname , ) lowercase__ : Any = RagRetriever( SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() ) return retriever def snake_case ( self : int ): lowercase__ : Any = 1 lowercase__ : str = self.get_dummy_canonical_hf_index_retriever() lowercase__ : Tuple = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = retriever.retrieve(SCREAMING_SNAKE_CASE , n_docs=SCREAMING_SNAKE_CASE ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(SCREAMING_SNAKE_CASE ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] ) self.assertEqual(len(doc_dicts[0]["id"] ) , SCREAMING_SNAKE_CASE ) self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def snake_case ( self : str ): lowercase__ : Dict = self.get_dummy_canonical_hf_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset: lowercase__ : Tuple = self.get_dummy_dataset() retriever.save_pretrained(SCREAMING_SNAKE_CASE ) lowercase__ : Optional[Any] = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) lowercase__ : int = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__ : List[str] = retriever.retrieve(SCREAMING_SNAKE_CASE , n_docs=1 ) self.assertTrue(out is not None ) def snake_case ( self : str ): lowercase__ : Union[str, Any] = 1 lowercase__ : Tuple = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE ) lowercase__ : List[str] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__ , lowercase__ , lowercase__ : Optional[Any] = retriever.retrieve(SCREAMING_SNAKE_CASE , n_docs=SCREAMING_SNAKE_CASE ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(SCREAMING_SNAKE_CASE ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] ) self.assertEqual(len(doc_dicts[0]["id"] ) , SCREAMING_SNAKE_CASE ) self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def snake_case ( self : Union[str, Any] ): lowercase__ : str = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(SCREAMING_SNAKE_CASE ) lowercase__ : List[Any] = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) lowercase__ : str = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__ : str = retriever.retrieve(SCREAMING_SNAKE_CASE , n_docs=1 ) self.assertTrue(out is not None ) def snake_case ( self : Union[str, Any] ): lowercase__ : Optional[Any] = 1 lowercase__ : str = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE ) lowercase__ : Optional[int] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__ , lowercase__ , lowercase__ : Dict = retriever.retrieve(SCREAMING_SNAKE_CASE , n_docs=SCREAMING_SNAKE_CASE ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(SCREAMING_SNAKE_CASE ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] ) self.assertEqual(len(doc_dicts[0]["id"] ) , SCREAMING_SNAKE_CASE ) self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def snake_case ( self : List[str] ): lowercase__ : List[str] = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(SCREAMING_SNAKE_CASE ) lowercase__ : int = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) lowercase__ : Tuple = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__ : Dict = retriever.retrieve(SCREAMING_SNAKE_CASE , n_docs=1 ) self.assertTrue(out is not None ) def snake_case ( self : Union[str, Any] ): lowercase__ : List[Any] = 1 lowercase__ : List[str] = self.get_dummy_legacy_index_retriever() lowercase__ : Any = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__ , lowercase__ , lowercase__ : str = retriever.retrieve(SCREAMING_SNAKE_CASE , n_docs=SCREAMING_SNAKE_CASE ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(SCREAMING_SNAKE_CASE ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ["text", "title"] ) self.assertEqual(len(doc_dicts[0]["text"] ) , SCREAMING_SNAKE_CASE ) self.assertEqual(doc_dicts[0]["text"][0] , "bar" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["text"][0] , "foo" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def snake_case ( self : Dict ): lowercase__ : Optional[int] = self.get_dummy_legacy_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(SCREAMING_SNAKE_CASE ) lowercase__ : Optional[int] = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) lowercase__ : Tuple = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__ : str = retriever.retrieve(SCREAMING_SNAKE_CASE , n_docs=1 ) self.assertTrue(out is not None ) @require_torch @require_tokenizers @require_sentencepiece def snake_case ( self : Any ): import torch lowercase__ : List[Any] = 1 lowercase__ : Union[str, Any] = self.get_dummy_canonical_hf_index_retriever() lowercase__ : Tuple = [[5, 7], [10, 11]] lowercase__ : Optional[Any] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__ : int = retriever(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , prefix=retriever.config.generator.prefix , n_docs=SCREAMING_SNAKE_CASE ) lowercase__ , lowercase__ , lowercase__ : List[str] = ( out["context_input_ids"], out["context_attention_mask"], out["retrieved_doc_embeds"], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , np.ndarray ) lowercase__ : List[str] = retriever( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , prefix=retriever.config.generator.prefix , n_docs=SCREAMING_SNAKE_CASE , return_tensors="pt" , ) lowercase__ , lowercase__ , lowercase__ , lowercase__ : List[Any] = ( # noqa: F841 out["context_input_ids"], out["context_attention_mask"], out["retrieved_doc_embeds"], out["doc_ids"], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(SCREAMING_SNAKE_CASE , torch.Tensor ) self.assertIsInstance(SCREAMING_SNAKE_CASE , torch.Tensor ) self.assertIsInstance(SCREAMING_SNAKE_CASE , torch.Tensor ) @require_torch @require_tokenizers @require_sentencepiece def snake_case ( self : int ): lowercase__ : List[Any] = self.get_dpr_ctx_encoder_tokenizer() lowercase__ : Optional[int] = 1 lowercase__ : str = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE ) retriever.set_ctx_encoder_tokenizer(SCREAMING_SNAKE_CASE ) lowercase__ : Union[str, Any] = [[5, 7], [10, 11]] lowercase__ : int = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__ : List[Any] = retriever(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , prefix=retriever.config.generator.prefix , n_docs=SCREAMING_SNAKE_CASE ) self.assertEqual( len(SCREAMING_SNAKE_CASE ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs self.assertEqual( all(k in out for k in ("tokenized_doc_ids", "tokenized_doc_attention_mask") ) , SCREAMING_SNAKE_CASE ) # check for doc token related keys in dictionary.
130
0
"""simple docstring""" import argparse import torch from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt if __name__ == "__main__": __SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser() parser.add_argument( '--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.' ) # !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml parser.add_argument( '--original_config_file', default=None, type=str, help='The YAML config file corresponding to the original architecture.', ) parser.add_argument( '--num_in_channels', default=None, type=int, help='The number of input channels. If `None` number of input channels will be automatically inferred.', ) parser.add_argument( '--scheduler_type', default='pndm', type=str, help='Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']', ) parser.add_argument( '--pipeline_type', default=None, type=str, help=( 'The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\'' '. If `None` pipeline will be automatically inferred.' ), ) parser.add_argument( '--image_size', default=None, type=int, help=( 'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2' ' Base. Use 768 for Stable Diffusion v2.' ), ) parser.add_argument( '--prediction_type', default=None, type=str, help=( 'The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable' ' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.' ), ) parser.add_argument( '--extract_ema', action='store_true', help=( 'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights' ' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield' ' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.' ), ) parser.add_argument( '--upcast_attention', action='store_true', help=( 'Whether the attention computation should always be upcasted. This is necessary when running stable' ' diffusion 2.1.' ), ) parser.add_argument( '--from_safetensors', action='store_true', help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.', ) parser.add_argument( '--to_safetensors', action='store_true', help='Whether to store pipeline in safetensors format or not.', ) parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)') parser.add_argument( '--stable_unclip', type=str, default=None, required=False, help='Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.', ) parser.add_argument( '--stable_unclip_prior', type=str, default=None, required=False, help='Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.', ) parser.add_argument( '--clip_stats_path', type=str, help='Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.', required=False, ) parser.add_argument( '--controlnet', action='store_true', default=None, help='Set flag if this is a controlnet checkpoint.' ) parser.add_argument('--half', action='store_true', help='Save weights in half precision.') parser.add_argument( '--vae_path', type=str, default=None, required=False, help='Set to a path, hub id to an already converted vae to not convert it again.', ) __SCREAMING_SNAKE_CASE : int = parser.parse_args() __SCREAMING_SNAKE_CASE : Optional[Any] = download_from_original_stable_diffusion_ckpt( checkpoint_path=args.checkpoint_path, original_config_file=args.original_config_file, image_size=args.image_size, prediction_type=args.prediction_type, model_type=args.pipeline_type, extract_ema=args.extract_ema, scheduler_type=args.scheduler_type, num_in_channels=args.num_in_channels, upcast_attention=args.upcast_attention, from_safetensors=args.from_safetensors, device=args.device, stable_unclip=args.stable_unclip, stable_unclip_prior=args.stable_unclip_prior, clip_stats_path=args.clip_stats_path, controlnet=args.controlnet, vae_path=args.vae_path, ) if args.half: pipe.to(torch_dtype=torch.floataa) if args.controlnet: # only save the controlnet model pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) else: pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
233
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : Tuple = { 'google/mobilenet_v1_1.0_224': 'https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json', 'google/mobilenet_v1_0.75_192': 'https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json', # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 } class __A (snake_case__): '''simple docstring''' __lowercase: List[Any] = """mobilenet_v1""" def __init__( self : Union[str, Any] , UpperCAmelCase_ : str=3 , UpperCAmelCase_ : List[Any]=224 , UpperCAmelCase_ : List[Any]=1.0 , UpperCAmelCase_ : Any=8 , UpperCAmelCase_ : int="relu6" , UpperCAmelCase_ : str=True , UpperCAmelCase_ : List[str]=0.999 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : List[Any]=0.001 , **UpperCAmelCase_ : Any , ) ->Union[str, Any]: """simple docstring""" super().__init__(**UpperCAmelCase_ ) if depth_multiplier <= 0: raise ValueError("""depth_multiplier must be greater than zero.""" ) snake_case_ = num_channels snake_case_ = image_size snake_case_ = depth_multiplier snake_case_ = min_depth snake_case_ = hidden_act snake_case_ = tf_padding snake_case_ = classifier_dropout_prob snake_case_ = initializer_range snake_case_ = layer_norm_eps class __A (snake_case__): '''simple docstring''' __lowercase: int = version.parse("""1.11""") @property def lowerCAmelCase ( self : Union[str, Any] ) ->Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict([("""pixel_values""", {0: """batch"""})] ) @property def lowerCAmelCase ( self : int ) ->Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "image-classification": return OrderedDict([("""logits""", {0: """batch"""})] ) else: return OrderedDict([("""last_hidden_state""", {0: """batch"""}), ("""pooler_output""", {0: """batch"""})] ) @property def lowerCAmelCase ( self : int ) ->float: """simple docstring""" return 1E-4
233
1
import argparse import logging import pickle import random import time import numpy as np from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO ) UpperCamelCase__ = logging.getLogger(__name__) def lowerCAmelCase_ ( ) -> int: '''simple docstring''' UpperCAmelCase__ = argparse.ArgumentParser( description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)." ) parser.add_argument("--file_path", type=__A, default="data/dump.txt", help="The path to the data." ) parser.add_argument("--tokenizer_type", type=__A, default="bert", choices=["bert", "roberta", "gpt2"] ) parser.add_argument("--tokenizer_name", type=__A, default="bert-base-uncased", help="The tokenizer to use." ) parser.add_argument("--dump_file", type=__A, default="data/dump", help="The dump file prefix." ) UpperCAmelCase__ = parser.parse_args() logger.info(f"""Loading Tokenizer ({args.tokenizer_name})""" ) if args.tokenizer_type == "bert": UpperCAmelCase__ = BertTokenizer.from_pretrained(args.tokenizer_name ) UpperCAmelCase__ = tokenizer.special_tokens_map["cls_token"] # `[CLS]` UpperCAmelCase__ = tokenizer.special_tokens_map["sep_token"] # `[SEP]` elif args.tokenizer_type == "roberta": UpperCAmelCase__ = RobertaTokenizer.from_pretrained(args.tokenizer_name ) UpperCAmelCase__ = tokenizer.special_tokens_map["cls_token"] # `<s>` UpperCAmelCase__ = tokenizer.special_tokens_map["sep_token"] # `</s>` elif args.tokenizer_type == "gpt2": UpperCAmelCase__ = GPTaTokenizer.from_pretrained(args.tokenizer_name ) UpperCAmelCase__ = tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>` UpperCAmelCase__ = tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>` logger.info(f"""Loading text from {args.file_path}""" ) with open(args.file_path, "r", encoding="utf8" ) as fp: UpperCAmelCase__ = fp.readlines() logger.info("Start encoding" ) logger.info(f"""{len(__A )} examples to process.""" ) UpperCAmelCase__ = [] UpperCAmelCase__ = 0 UpperCAmelCase__ = 10_000 UpperCAmelCase__ = time.time() for text in data: UpperCAmelCase__ = f"""{bos} {text.strip()} {sep}""" UpperCAmelCase__ = tokenizer.encode(__A, add_special_tokens=__A ) rslt.append(__A ) iter += 1 if iter % interval == 0: UpperCAmelCase__ = time.time() logger.info(f"""{iter} examples processed. - {(end-start):.2f}s/{interval}expl""" ) UpperCAmelCase__ = time.time() logger.info("Finished binarization" ) logger.info(f"""{len(__A )} examples processed.""" ) UpperCAmelCase__ = f"""{args.dump_file}.{args.tokenizer_name}.pickle""" UpperCAmelCase__ = tokenizer.vocab_size if vocab_size < (1 << 16): UpperCAmelCase__ = [np.uintaa(__A ) for d in rslt] else: UpperCAmelCase__ = [np.intaa(__A ) for d in rslt] random.shuffle(rslt_ ) logger.info(f"""Dump to {dp_file}""" ) with open(__A, "wb" ) as handle: pickle.dump(rslt_, __A, protocol=pickle.HIGHEST_PROTOCOL ) if __name__ == "__main__": main()
65
from itertools import product from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros def a ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = k_size // 2 __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = mgrid[0 - center : k_size - center, 0 - center : k_size - center] __UpperCAmelCase : Any = 1 / (2 * pi * sigma) * exp(-(square(_UpperCAmelCase ) + square(_UpperCAmelCase )) / (2 * square(_UpperCAmelCase )) ) return g def a ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] ): '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = image.shape[0], image.shape[1] # dst image height and width __UpperCAmelCase : str = height - k_size + 1 __UpperCAmelCase : Optional[int] = width - k_size + 1 # im2col, turn the k_size*k_size pixels into a row and np.vstack all rows __UpperCAmelCase : str = zeros((dst_height * dst_width, k_size * k_size) ) __UpperCAmelCase : Optional[Any] = 0 for i, j in product(range(_UpperCAmelCase ) , range(_UpperCAmelCase ) ): __UpperCAmelCase : int = ravel(image[i : i + k_size, j : j + k_size] ) __UpperCAmelCase : Union[str, Any] = window row += 1 # turn the kernel into shape(k*k, 1) __UpperCAmelCase : Tuple = gen_gaussian_kernel(_UpperCAmelCase , _UpperCAmelCase ) __UpperCAmelCase : List[Any] = ravel(_UpperCAmelCase ) # reshape and get the dst image __UpperCAmelCase : Optional[Any] = dot(_UpperCAmelCase , _UpperCAmelCase ).reshape(_UpperCAmelCase , _UpperCAmelCase ).astype(_UpperCAmelCase ) return dst if __name__ == "__main__": # read original image __A =imread(R"../image_data/lena.jpg") # turn image in gray scale value __A =cvtColor(img, COLOR_BGR2GRAY) # get values with two different mask size __A =gaussian_filter(gray, 3, sigma=1) __A =gaussian_filter(gray, 5, sigma=0.8) # show result images imshow("gaussian filter with 3x3 mask", gaussianaxa) imshow("gaussian filter with 5x5 mask", gaussianaxa) waitKey()
226
0
"""simple docstring""" from math import isqrt def _a ( _snake_case ): """simple docstring""" return all(number % divisor != 0 for divisor in range(2 , isqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) ) def _a ( _snake_case = 10**6 ): """simple docstring""" UpperCAmelCase = 0 UpperCAmelCase = 1 UpperCAmelCase = 7 while prime_candidate < max_prime: primes_count += is_prime(SCREAMING_SNAKE_CASE_ ) cube_index += 1 prime_candidate += 6 * cube_index return primes_count if __name__ == "__main__": print(F"""{solution() = }""")
366
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from transformers.utils import is_vision_available from transformers.utils.generic import TensorType from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import logging if is_vision_available(): import PIL _UpperCamelCase = logging.get_logger(__name__) def _a ( _snake_case ): """simple docstring""" if isinstance(_snake_case , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(_snake_case , (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(_snake_case ): return [[videos]] raise ValueError(F'''Could not make batched video from {videos}''' ) class lowerCamelCase__ ( snake_case ): SCREAMING_SNAKE_CASE = ['''pixel_values'''] def __init__( self ,A = True ,A = None ,A = PILImageResampling.BILINEAR ,A = True ,A = None ,A = True ,A = 1 / 255 ,A = True ,A = True ,A = None ,A = None ,**A ,): super().__init__(**A ) UpperCAmelCase = size if size is not None else {"""shortest_edge""": 256} UpperCAmelCase = get_size_dict(A ,default_to_square=A ) UpperCAmelCase = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} UpperCAmelCase = get_size_dict(A ,param_name="""crop_size""" ) UpperCAmelCase = do_resize UpperCAmelCase = size UpperCAmelCase = do_center_crop UpperCAmelCase = crop_size UpperCAmelCase = resample UpperCAmelCase = do_rescale UpperCAmelCase = rescale_factor UpperCAmelCase = offset UpperCAmelCase = do_normalize UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD def _UpperCamelCase ( self ,A ,A ,A = PILImageResampling.BILINEAR ,A = None ,**A ,): UpperCAmelCase = get_size_dict(A ,default_to_square=A ) if "shortest_edge" in size: UpperCAmelCase = get_resize_output_image_size(A ,size["""shortest_edge"""] ,default_to_square=A ) elif "height" in size and "width" in size: UpperCAmelCase = (size["""height"""], size["""width"""]) else: raise ValueError(F'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' ) return resize(A ,size=A ,resample=A ,data_format=A ,**A ) def _UpperCamelCase ( self ,A ,A ,A = None ,**A ,): UpperCAmelCase = get_size_dict(A ) if "height" not in size or "width" not in size: raise ValueError(F'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' ) return center_crop(A ,size=(size["""height"""], size["""width"""]) ,data_format=A ,**A ) def _UpperCamelCase ( self ,A ,A ,A = True ,A = None ,**A ,): UpperCAmelCase = image.astype(np.floataa ) if offset: UpperCAmelCase = image - (scale / 2) return rescale(A ,scale=A ,data_format=A ,**A ) def _UpperCamelCase ( self ,A ,A ,A ,A = None ,**A ,): return normalize(A ,mean=A ,std=A ,data_format=A ,**A ) def _UpperCamelCase ( self ,A ,A = None ,A = None ,A = None ,A = None ,A = None ,A = None ,A = None ,A = None ,A = None ,A = None ,A = None ,A = ChannelDimension.FIRST ,): if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) if offset and not do_rescale: raise ValueError("""For offset, do_rescale must also be set to True.""" ) # All transformations expect numpy arrays. UpperCAmelCase = to_numpy_array(A ) if do_resize: UpperCAmelCase = self.resize(image=A ,size=A ,resample=A ) if do_center_crop: UpperCAmelCase = self.center_crop(A ,size=A ) if do_rescale: UpperCAmelCase = self.rescale(image=A ,scale=A ,offset=A ) if do_normalize: UpperCAmelCase = self.normalize(image=A ,mean=A ,std=A ) UpperCAmelCase = to_channel_dimension_format(A ,A ) return image def _UpperCamelCase ( self ,A ,A = None ,A = None ,A = None ,A = None ,A = None ,A = None ,A = None ,A = None ,A = None ,A = None ,A = None ,A = None ,A = ChannelDimension.FIRST ,**A ,): UpperCAmelCase = do_resize if do_resize is not None else self.do_resize UpperCAmelCase = resample if resample is not None else self.resample UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCAmelCase = offset if offset is not None else self.offset UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize UpperCAmelCase = image_mean if image_mean is not None else self.image_mean UpperCAmelCase = image_std if image_std is not None else self.image_std UpperCAmelCase = size if size is not None else self.size UpperCAmelCase = get_size_dict(A ,default_to_square=A ) UpperCAmelCase = crop_size if crop_size is not None else self.crop_size UpperCAmelCase = get_size_dict(A ,param_name="""crop_size""" ) if not valid_images(A ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) UpperCAmelCase = make_batched(A ) UpperCAmelCase = [ [ self._preprocess_image( image=A ,do_resize=A ,size=A ,resample=A ,do_center_crop=A ,crop_size=A ,do_rescale=A ,rescale_factor=A ,offset=A ,do_normalize=A ,image_mean=A ,image_std=A ,data_format=A ,) for img in video ] for video in videos ] UpperCAmelCase = {"""pixel_values""": videos} return BatchFeature(data=A ,tensor_type=A )
234
0
import argparse import logging import sys from unittest.mock import patch import run_glue_deebert from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow logging.basicConfig(level=logging.DEBUG) _SCREAMING_SNAKE_CASE = logging.getLogger() def SCREAMING_SNAKE_CASE__ ( ): snake_case_ : int = argparse.ArgumentParser() parser.add_argument('-f' ) snake_case_ : List[Any] = parser.parse_args() return args.f class SCREAMING_SNAKE_CASE_ ( __lowercase ): def UpperCAmelCase_ ( self : List[str] ) -> None: """simple docstring""" snake_case_ : Dict = logging.StreamHandler(sys.stdout ) logger.addHandler(_a ) def UpperCAmelCase_ ( self : Union[str, Any] , _A : Any ) -> Any: """simple docstring""" snake_case_ : Optional[int] = get_gpu_count() if n_gpu > 1: pass # XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560 # script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py" # distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split() # cmd = [sys.executable] + distributed_args + args # execute_subprocess_async(cmd, env=self.get_env()) # XXX: test the results - need to save them first into .json file else: args.insert(0 , 'run_glue_deebert.py' ) with patch.object(_a , 'argv' , _a ): snake_case_ : Optional[Any] = run_glue_deebert.main() for value in result.values(): self.assertGreaterEqual(_a , 0.6_6_6 ) @slow @require_torch_non_multi_gpu def UpperCAmelCase_ ( self : int ) -> Dict: """simple docstring""" snake_case_ : Optional[Any] = ''' --model_type roberta --model_name_or_path roberta-base --task_name MRPC --do_train --do_eval --do_lower_case --data_dir ./tests/fixtures/tests_samples/MRPC/ --max_seq_length 128 --per_gpu_eval_batch_size=1 --per_gpu_train_batch_size=8 --learning_rate 2e-4 --num_train_epochs 3 --overwrite_output_dir --seed 42 --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --plot_data_dir ./examples/deebert/results/ --save_steps 0 --overwrite_cache --eval_after_first_stage '''.split() self.run_and_check(_a ) snake_case_ : Union[str, Any] = ''' --model_type roberta --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --task_name MRPC --do_eval --do_lower_case --data_dir ./tests/fixtures/tests_samples/MRPC/ --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --plot_data_dir ./examples/deebert/results/ --max_seq_length 128 --eval_each_highway --eval_highway --overwrite_cache --per_gpu_eval_batch_size=1 '''.split() self.run_and_check(_a ) snake_case_ : List[Any] = ''' --model_type roberta --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --task_name MRPC --do_eval --do_lower_case --data_dir ./tests/fixtures/tests_samples/MRPC/ --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --plot_data_dir ./examples/deebert/results/ --max_seq_length 128 --early_exit_entropy 0.1 --eval_highway --overwrite_cache --per_gpu_eval_batch_size=1 '''.split() self.run_and_check(_a )
327
import json import os import unittest from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCAmelCase_ ( __lowercase , unittest.TestCase ): """simple docstring""" UpperCAmelCase__ : int = GPTaTokenizer UpperCAmelCase__ : str = GPTaTokenizerFast UpperCAmelCase__ : Tuple = True UpperCAmelCase__ : List[Any] = {"add_prefix_space": True} UpperCAmelCase__ : int = False def __lowercase ( self ) -> int: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt _a : str = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', '''<|endoftext|>''', ] _a : List[Any] = dict(zip(_a , range(len(_a ) ) ) ) _a : Tuple = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] _a : List[str] = {'''unk_token''': '''<unk>'''} _a : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) _a : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(_a ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(_a ) ) def __lowercase ( self , **_a ) -> List[str]: kwargs.update(self.special_tokens_map ) return GPTaTokenizer.from_pretrained(self.tmpdirname , **_a ) def __lowercase ( self , **_a ) -> List[Any]: kwargs.update(self.special_tokens_map ) return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **_a ) def __lowercase ( self , _a ) -> Optional[Any]: _a : Tuple = '''lower newer''' _a : Tuple = '''lower newer''' return input_text, output_text def __lowercase ( self ) -> Any: _a : Any = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) _a : int = '''lower newer''' _a : int = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er'''] _a : List[str] = tokenizer.tokenize(_a , add_prefix_space=_a ) self.assertListEqual(_a , _a ) _a : Optional[int] = tokens + [tokenizer.unk_token] _a : List[Any] = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9] self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , _a ) def __lowercase ( self ) -> Optional[int]: if not self.test_rust_tokenizer: return _a : int = self.get_tokenizer() _a : Tuple = self.get_rust_tokenizer(add_prefix_space=_a ) _a : Tuple = '''lower newer''' # Testing tokenization _a : List[str] = tokenizer.tokenize(_a , add_prefix_space=_a ) _a : Optional[Any] = rust_tokenizer.tokenize(_a ) self.assertListEqual(_a , _a ) # Testing conversion to ids without special tokens _a : List[Any] = tokenizer.encode(_a , add_special_tokens=_a , add_prefix_space=_a ) _a : List[Any] = rust_tokenizer.encode(_a , add_special_tokens=_a ) self.assertListEqual(_a , _a ) # Testing conversion to ids with special tokens _a : List[str] = self.get_rust_tokenizer(add_prefix_space=_a ) _a : List[str] = tokenizer.encode(_a , add_prefix_space=_a ) _a : Tuple = rust_tokenizer.encode(_a ) self.assertListEqual(_a , _a ) # Testing the unknown token _a : Optional[Any] = tokens + [rust_tokenizer.unk_token] _a : str = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(_a ) , _a ) def __lowercase ( self , *_a , **_a ) -> int: # It's very difficult to mix/test pretokenization with byte-level # And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string) pass def __lowercase ( self , _a=1_5 ) -> str: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): _a : Optional[int] = self.rust_tokenizer_class.from_pretrained(_a , **_a ) # Simple input _a : List[str] = '''This is a simple input''' _a : Optional[Any] = ['''This is a simple input 1''', '''This is a simple input 2'''] _a : Tuple = ('''This is a simple input''', '''This is a pair''') _a : Optional[Any] = [ ('''This is a simple input 1''', '''This is a simple input 2'''), ('''This is a simple pair 1''', '''This is a simple pair 2'''), ] # Simple input tests self.assertRaises(_a , tokenizer_r.encode , _a , max_length=_a , padding='''max_length''' ) # Simple input self.assertRaises(_a , tokenizer_r.encode_plus , _a , max_length=_a , padding='''max_length''' ) # Simple input self.assertRaises( _a , tokenizer_r.batch_encode_plus , _a , max_length=_a , padding='''max_length''' , ) # Pair input self.assertRaises(_a , tokenizer_r.encode , _a , max_length=_a , padding='''max_length''' ) # Pair input self.assertRaises(_a , tokenizer_r.encode_plus , _a , max_length=_a , padding='''max_length''' ) # Pair input self.assertRaises( _a , tokenizer_r.batch_encode_plus , _a , max_length=_a , padding='''max_length''' , ) def __lowercase ( self ) -> List[Any]: _a : int = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token='''<pad>''' ) # Simple input _a : int = '''This is a simple input''' _a : int = ['''This is a simple input looooooooong''', '''This is a simple input'''] _a : Any = ('''This is a simple input''', '''This is a pair''') _a : Dict = [ ('''This is a simple input loooooong''', '''This is a simple input'''), ('''This is a simple pair loooooong''', '''This is a simple pair'''), ] _a : Optional[int] = tokenizer.pad_token_id _a : List[Any] = tokenizer(_a , padding='''max_length''' , max_length=3_0 , return_tensors='''np''' ) _a : Optional[int] = tokenizer(_a , padding=_a , truncate=_a , return_tensors='''np''' ) _a : int = tokenizer(*_a , padding='''max_length''' , max_length=6_0 , return_tensors='''np''' ) _a : Union[str, Any] = tokenizer(_a , padding=_a , truncate=_a , return_tensors='''np''' ) # s # test single string max_length padding self.assertEqual(out_s['''input_ids'''].shape[-1] , 3_0 ) self.assertTrue(pad_token_id in out_s['''input_ids'''] ) self.assertTrue(0 in out_s['''attention_mask'''] ) # s2 # test automatic padding self.assertEqual(out_sa['''input_ids'''].shape[-1] , 3_3 ) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] ) self.assertFalse(0 in out_sa['''attention_mask'''][0] ) # short slice does have padding self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] ) self.assertTrue(0 in out_sa['''attention_mask'''][1] ) # p # test single pair max_length padding self.assertEqual(out_p['''input_ids'''].shape[-1] , 6_0 ) self.assertTrue(pad_token_id in out_p['''input_ids'''] ) self.assertTrue(0 in out_p['''attention_mask'''] ) # p2 # test automatic padding pair self.assertEqual(out_pa['''input_ids'''].shape[-1] , 5_2 ) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] ) self.assertFalse(0 in out_pa['''attention_mask'''][0] ) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] ) self.assertTrue(0 in out_pa['''attention_mask'''][1] ) def __lowercase ( self ) -> Any: _a : Union[str, Any] = '''$$$''' _a : List[Any] = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=_a , add_bos_token=_a ) _a : Any = '''This is a simple input''' _a : str = ['''This is a simple input 1''', '''This is a simple input 2'''] _a : Tuple = tokenizer.bos_token_id _a : Optional[Any] = tokenizer(_a ) _a : str = tokenizer(_a ) self.assertEqual(out_s.input_ids[0] , _a ) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) ) _a : str = tokenizer.decode(out_s.input_ids ) _a : Optional[Any] = tokenizer.batch_decode(out_sa.input_ids ) self.assertEqual(decode_s.split()[0] , _a ) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) ) def __lowercase ( self ) -> str: pass def __lowercase ( self ) -> Dict: # TODO: change to self.get_tokenizers() when the fast version is implemented _a : Optional[int] = [self.get_tokenizer(do_lower_case=_a , add_bos_token=_a )] for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): _a : Tuple = '''Encode this.''' _a : Optional[Any] = '''This one too please.''' _a : List[Any] = tokenizer.encode(_a , add_special_tokens=_a ) encoded_sequence += tokenizer.encode(_a , add_special_tokens=_a ) _a : List[str] = tokenizer.encode_plus( _a , _a , add_special_tokens=_a , return_special_tokens_mask=_a , ) _a : int = encoded_sequence_dict['''input_ids'''] _a : int = encoded_sequence_dict['''special_tokens_mask'''] self.assertEqual(len(_a ) , len(_a ) ) _a : List[Any] = [ (x if not special_tokens_mask[i] else None) for i, x in enumerate(_a ) ] _a : str = [x for x in filtered_sequence if x is not None] self.assertEqual(_a , _a ) @require_tokenizers class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" def __lowercase ( self ) -> List[Any]: # More context: # https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1 # https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519 # https://github.com/huggingface/transformers/pull/17088#discussion_r871246439 _a : Any = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , from_slow=_a ) _a : int = '''A photo of a cat''' _a : List[Any] = tokenizer.encode( _a , ) self.assertEqual(_a , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] ) tokenizer.save_pretrained('''test_opt''' ) _a : Union[str, Any] = AutoTokenizer.from_pretrained('''./test_opt''' ) _a : Any = tokenizer.encode( _a , ) self.assertEqual(_a , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] ) def __lowercase ( self ) -> int: _a : Union[str, Any] = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , use_slow=_a ) _a : Any = '''A photo of a cat''' _a : Optional[int] = tokenizer.encode( _a , ) # Same as above self.assertEqual(_a , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] ) @unittest.skip('''This test is failing because of a bug in the fast tokenizer''' ) def __lowercase ( self ) -> Any: _a : Union[str, Any] = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , from_slow=_a ) _a : Optional[Any] = '''bos''' _a : Optional[Any] = tokenizer.get_vocab()['''bos'''] _a : str = '''A photo of a cat''' _a : int = tokenizer.encode( _a , ) # We changed the bos token self.assertEqual(_a , [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] ) tokenizer.save_pretrained('''./tok''' ) _a : Dict = AutoTokenizer.from_pretrained('''./tok''' ) self.assertTrue(tokenizer.is_fast ) _a : Union[str, Any] = tokenizer.encode( _a , ) self.assertEqual(_a , [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
235
0
import argparse from torch import nn # transformers_old should correspond to branch `save_old_prophetnet_model_structure` here # original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively from transformers_old.modeling_prophetnet import ( ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld, ) from transformers_old.modeling_xlm_prophetnet import ( XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld, ) from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging lowerCAmelCase__ = logging.get_logger(__name__) logging.set_verbosity_info() def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" if "xprophetnet" in prophetnet_checkpoint_path: lowercase__ : Any = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowerCamelCase__ ) lowercase__ : Optional[int] = XLMProphetNetForConditionalGeneration.from_pretrained( lowerCamelCase__ , output_loading_info=lowerCamelCase__ ) else: lowercase__ : Optional[Any] = ProphetNetForConditionalGenerationOld.from_pretrained(lowerCamelCase__ ) lowercase__ : Any = ProphetNetForConditionalGeneration.from_pretrained( lowerCamelCase__ , output_loading_info=lowerCamelCase__ ) lowercase__ : Any = ["key_proj", "value_proj", "query_proj"] lowercase__ : Optional[Any] = { "self_attn": "ngram_self_attn", "cross_attn": "encoder_attn", "cross_attn_layer_norm": "encoder_attn_layer_norm", "feed_forward_layer_norm": "final_layer_norm", "feed_forward": "", "intermediate": "fc1", "output": "fc2", "key_proj": "k_proj", "query_proj": "q_proj", "value_proj": "v_proj", "word_embeddings": "embed_tokens", "embeddings_layer_norm": "emb_layer_norm", "relative_pos_embeddings": "relative_linear", "ngram_embeddings": "ngram_input_embed", "position_embeddings": "embed_positions", } for key in loading_info["missing_keys"]: lowercase__ : Optional[int] = key.split("." ) if attributes[0] == "lm_head": lowercase__ : List[Any] = prophet lowercase__ : int = prophet_old else: lowercase__ : str = prophet.prophetnet lowercase__ : Optional[Any] = prophet_old.model lowercase__ : List[str] = False for attribute in attributes: if attribute in mapping: lowercase__ : Union[str, Any] = mapping[attribute] if not hasattr(lowerCamelCase__ , lowerCamelCase__ ) and len(lowerCamelCase__ ) > 0: lowercase__ : Any = attribute elif hasattr(lowerCamelCase__ , lowerCamelCase__ ): lowercase__ : Optional[int] = attribute if attribute == "weight": assert old_model.weight.shape == model.weight.shape, "Shapes have to match!" lowercase__ : List[Any] = old_model.weight logger.info(F"""{attribute} is initialized.""" ) lowercase__ : str = True break elif attribute == "bias": assert old_model.bias.shape == model.bias.shape, "Shapes have to match!" lowercase__ : Dict = old_model.bias logger.info(F"""{attribute} is initialized""" ) lowercase__ : Dict = True break elif attribute in special_keys and hasattr(lowerCamelCase__ , "in_proj_weight" ): lowercase__ : Union[str, Any] = old_model.in_proj_weight.shape[0] // 3 lowercase__ : Dict = getattr(lowerCamelCase__ , lowerCamelCase__ ) param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match" param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match" if attribute == "query_proj": lowercase__ : List[Any] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] ) lowercase__ : List[Any] = nn.Parameter(old_model.in_proj_bias[:embed_dim] ) elif attribute == "key_proj": lowercase__ : List[str] = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] ) lowercase__ : Union[str, Any] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] ) elif attribute == "value_proj": lowercase__ : List[Any] = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] ) lowercase__ : str = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] ) lowercase__ : List[Any] = True break elif attribute == "position_embeddings": assert ( model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1] ), "Hidden size has to match" assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings." lowercase__ : int = nn.Parameter(old_model.embed_positions.weight[:512, :] ) lowercase__ : List[Any] = True break if attribute.isdigit(): lowercase__ : Any = model[int(lowerCamelCase__ )] lowercase__ : List[str] = old_model[int(lowerCamelCase__ )] else: lowercase__ : Optional[Any] = getattr(lowerCamelCase__ , lowerCamelCase__ ) if old_attribute == "": lowercase__ : Union[str, Any] = old_model else: if not hasattr(lowerCamelCase__ , lowerCamelCase__ ): raise ValueError(F"""{old_model} does not have {old_attribute}""" ) lowercase__ : Dict = getattr(lowerCamelCase__ , lowerCamelCase__ ) if not is_key_init: raise ValueError(F"""{key} was not correctly initialized!""" ) print(F"""Saving model to {pytorch_dump_folder_path}""" ) prophet.save_pretrained(lowerCamelCase__ ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--prophetnet_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) lowerCAmelCase__ = parser.parse_args() convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
359
from math import ceil, sqrt def __lowerCamelCase ( lowerCamelCase__ = 1_000_000 ): """simple docstring""" lowercase__ : int = 0 for outer_width in range(3 , (limit // 4) + 2 ): if outer_width**2 > limit: lowercase__ : List[str] = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 ) else: lowercase__ : List[str] = 1 if (outer_width - hole_width_lower_bound) % 2: hole_width_lower_bound += 1 answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1 return answer if __name__ == "__main__": print(f'''{solution() = }''')
121
0
'''simple docstring''' import inspect import unittest import numpy as np from transformers import BeitConfig from transformers.testing_utils import require_flax, require_vision, slow from transformers.utils import cached_property, is_flax_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor if is_flax_available(): import jax from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel if is_vision_available(): from PIL import Image from transformers import BeitImageProcessor class a_ ( unittest.TestCase ): def __init__( self : Dict , lowercase : Union[str, Any] , lowercase : Any=100 , lowercase : List[str]=13 , lowercase : int=30 , lowercase : Dict=2 , lowercase : List[str]=3 , lowercase : List[Any]=True , lowercase : Union[str, Any]=True , lowercase : Union[str, Any]=32 , lowercase : Tuple=5 , lowercase : List[Any]=4 , lowercase : Union[str, Any]=37 , lowercase : Optional[int]="gelu" , lowercase : Any=0.1 , lowercase : Optional[Any]=0.1 , lowercase : Optional[Any]=10 , lowercase : Union[str, Any]=0.02 , lowercase : Union[str, Any]=3 , ): """simple docstring""" lowercase_ :int = parent lowercase_ :List[Any] = vocab_size lowercase_ :Dict = batch_size lowercase_ :str = image_size lowercase_ :List[Any] = patch_size lowercase_ :List[str] = num_channels lowercase_ :Tuple = is_training lowercase_ :Dict = use_labels lowercase_ :Any = hidden_size lowercase_ :str = num_hidden_layers lowercase_ :List[Any] = num_attention_heads lowercase_ :Union[str, Any] = intermediate_size lowercase_ :List[str] = hidden_act lowercase_ :Optional[Any] = hidden_dropout_prob lowercase_ :Optional[Any] = attention_probs_dropout_prob lowercase_ :Union[str, Any] = type_sequence_label_size lowercase_ :Union[str, Any] = initializer_range # in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) lowercase_ :List[Any] = (image_size // patch_size) ** 2 lowercase_ :int = num_patches + 1 def lowercase__ ( self : str ): """simple docstring""" lowercase_ :List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase_ :Tuple = None if self.use_labels: lowercase_ :Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase_ :Optional[int] = BeitConfig( vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase , initializer_range=self.initializer_range , ) return config, pixel_values, labels def lowercase__ ( self : Union[str, Any] , lowercase : str , lowercase : Optional[Any] , lowercase : Optional[Any] ): """simple docstring""" lowercase_ :List[str] = FlaxBeitModel(config=lowercase ) lowercase_ :Tuple = model(lowercase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowercase__ ( self : Union[str, Any] , lowercase : Optional[int] , lowercase : Union[str, Any] , lowercase : Dict ): """simple docstring""" lowercase_ :int = FlaxBeitForMaskedImageModeling(config=lowercase ) lowercase_ :str = model(lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) ) def lowercase__ ( self : List[str] , lowercase : Union[str, Any] , lowercase : Union[str, Any] , lowercase : Dict ): """simple docstring""" lowercase_ :str = self.type_sequence_label_size lowercase_ :Optional[Any] = FlaxBeitForImageClassification(config=lowercase ) lowercase_ :List[str] = model(lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images lowercase_ :Optional[int] = 1 lowercase_ :List[str] = FlaxBeitForImageClassification(lowercase ) lowercase_ :Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowercase_ :int = model(lowercase ) def lowercase__ ( self : Union[str, Any] ): """simple docstring""" lowercase_ :int = self.prepare_config_and_inputs() ( ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ) :List[str] = config_and_inputs lowercase_ :Optional[Any] = {"pixel_values": pixel_values} return config, inputs_dict @require_flax class a_ ( _lowerCAmelCase , unittest.TestCase ): __A = ( (FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else () ) def lowercase__ ( self : Optional[int] ): """simple docstring""" lowercase_ :Dict = FlaxBeitModelTester(self ) lowercase_ :int = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase , hidden_size=37 ) def lowercase__ ( self : int ): """simple docstring""" self.config_tester.run_common_tests() def lowercase__ ( self : Tuple ): """simple docstring""" lowercase_ , lowercase_ :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ :Optional[Any] = model_class(lowercase ) lowercase_ :List[Any] = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase_ :str = [*signature.parameters.keys()] lowercase_ :List[Any] = ["pixel_values"] self.assertListEqual(arg_names[:1] , lowercase ) def lowercase__ ( self : List[str] ): """simple docstring""" lowercase_ , lowercase_ :Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): lowercase_ :Dict = self._prepare_for_class(lowercase , lowercase ) lowercase_ :Optional[int] = model_class(lowercase ) @jax.jit def model_jitted(lowercase : Dict , **lowercase : Any ): return model(pixel_values=lowercase , **lowercase ) with self.subTest("JIT Enabled" ): lowercase_ :int = model_jitted(**lowercase ).to_tuple() with self.subTest("JIT Disabled" ): with jax.disable_jit(): lowercase_ :Any = model_jitted(**lowercase ).to_tuple() self.assertEqual(len(lowercase ) , len(lowercase ) ) for jitted_output, output in zip(lowercase , lowercase ): self.assertEqual(jitted_output.shape , output.shape ) def lowercase__ ( self : Dict ): """simple docstring""" lowercase_ :str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase ) def lowercase__ ( self : List[str] ): """simple docstring""" lowercase_ :Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*lowercase ) def lowercase__ ( self : List[Any] ): """simple docstring""" lowercase_ :int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowercase ) @slow def lowercase__ ( self : int ): """simple docstring""" for model_class_name in self.all_model_classes: lowercase_ :str = model_class_name.from_pretrained("microsoft/beit-base-patch16-224" ) lowercase_ :int = model(np.ones((1, 3, 224, 224) ) ) self.assertIsNotNone(lowercase ) def UpperCAmelCase_ ( ): lowercase_ :List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_vision @require_flax class a_ ( unittest.TestCase ): @cached_property def lowercase__ ( self : Optional[Any] ): """simple docstring""" return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None @slow def lowercase__ ( self : str ): """simple docstring""" lowercase_ :List[str] = FlaxBeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ) lowercase_ :str = self.default_image_processor lowercase_ :int = prepare_img() lowercase_ :Any = image_processor(images=lowercase , return_tensors="np" ).pixel_values # prepare bool_masked_pos lowercase_ :List[str] = np.ones((1, 196) , dtype=lowercase ) # forward pass lowercase_ :List[str] = model(pixel_values=lowercase , bool_masked_pos=lowercase ) lowercase_ :Dict = outputs.logits # verify the logits lowercase_ :Tuple = (1, 196, 8_192) self.assertEqual(logits.shape , lowercase ) lowercase_ :Dict = np.array( [[-3.24_37, 0.50_72, -13.91_74], [-3.24_56, 0.49_48, -13.94_01], [-3.20_33, 0.51_21, -13.85_50]] ) self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , lowercase , atol=1e-2 ) ) @slow def lowercase__ ( self : Tuple ): """simple docstring""" lowercase_ :int = FlaxBeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ) lowercase_ :Tuple = self.default_image_processor lowercase_ :Dict = prepare_img() lowercase_ :str = image_processor(images=lowercase , return_tensors="np" ) # forward pass lowercase_ :Any = model(**lowercase ) lowercase_ :Tuple = outputs.logits # verify the logits lowercase_ :Dict = (1, 1_000) self.assertEqual(logits.shape , lowercase ) lowercase_ :List[str] = np.array([-1.23_85, -1.09_87, -1.01_08] ) self.assertTrue(np.allclose(logits[0, :3] , lowercase , atol=1e-4 ) ) lowercase_ :Dict = 281 self.assertEqual(logits.argmax(-1 ).item() , lowercase ) @slow def lowercase__ ( self : List[Any] ): """simple docstring""" lowercase_ :List[str] = FlaxBeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ) lowercase_ :List[Any] = self.default_image_processor lowercase_ :Union[str, Any] = prepare_img() lowercase_ :Tuple = image_processor(images=lowercase , return_tensors="np" ) # forward pass lowercase_ :List[Any] = model(**lowercase ) lowercase_ :Dict = outputs.logits # verify the logits lowercase_ :List[str] = (1, 21_841) self.assertEqual(logits.shape , lowercase ) lowercase_ :Union[str, Any] = np.array([1.68_81, -0.27_87, 0.59_01] ) self.assertTrue(np.allclose(logits[0, :3] , lowercase , atol=1e-4 ) ) lowercase_ :List[Any] = 2_396 self.assertEqual(logits.argmax(-1 ).item() , lowercase )
223
'''simple docstring''' import unittest import numpy as np from transformers import BertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): from transformers.models.bert.modeling_flax_bert import ( FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForNextSentencePrediction, FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertModel, ) class a_ ( unittest.TestCase ): def __init__( self : List[str] , lowercase : str , lowercase : Union[str, Any]=13 , lowercase : int=7 , lowercase : List[str]=True , lowercase : int=True , lowercase : str=True , lowercase : Any=True , lowercase : List[str]=99 , lowercase : Union[str, Any]=32 , lowercase : Optional[Any]=5 , lowercase : Dict=4 , lowercase : Dict=37 , lowercase : Dict="gelu" , lowercase : Optional[int]=0.1 , lowercase : str=0.1 , lowercase : List[Any]=512 , lowercase : str=16 , lowercase : Dict=2 , lowercase : Any=0.02 , lowercase : Any=4 , ): """simple docstring""" lowercase_ :List[str] = parent lowercase_ :Any = batch_size lowercase_ :Dict = seq_length lowercase_ :Union[str, Any] = is_training lowercase_ :Optional[int] = use_attention_mask lowercase_ :Any = use_token_type_ids lowercase_ :Union[str, Any] = use_labels lowercase_ :Dict = vocab_size lowercase_ :Tuple = hidden_size lowercase_ :Tuple = num_hidden_layers lowercase_ :Optional[int] = num_attention_heads lowercase_ :Optional[Any] = intermediate_size lowercase_ :str = hidden_act lowercase_ :Tuple = hidden_dropout_prob lowercase_ :Optional[Any] = attention_probs_dropout_prob lowercase_ :Tuple = max_position_embeddings lowercase_ :Any = type_vocab_size lowercase_ :int = type_sequence_label_size lowercase_ :Tuple = initializer_range lowercase_ :Optional[Any] = num_choices def lowercase__ ( self : List[Any] ): """simple docstring""" lowercase_ :int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase_ :Union[str, Any] = None if self.use_attention_mask: lowercase_ :Dict = random_attention_mask([self.batch_size, self.seq_length] ) lowercase_ :List[str] = None if self.use_token_type_ids: lowercase_ :str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowercase_ :Optional[Any] = BertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def lowercase__ ( self : Union[str, Any] ): """simple docstring""" lowercase_ :int = self.prepare_config_and_inputs() lowercase_ , lowercase_ , lowercase_ , lowercase_ :Tuple = config_and_inputs lowercase_ :Tuple = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask} return config, inputs_dict def lowercase__ ( self : List[Any] ): """simple docstring""" lowercase_ :Any = self.prepare_config_and_inputs() lowercase_ , lowercase_ , lowercase_ , lowercase_ :Union[str, Any] = config_and_inputs lowercase_ :Dict = True lowercase_ :Optional[int] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) lowercase_ :str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, attention_mask, encoder_hidden_states, encoder_attention_mask, ) @require_flax class a_ ( _lowerCAmelCase , unittest.TestCase ): __A = True __A = ( ( FlaxBertModel, FlaxBertForPreTraining, FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForQuestionAnswering, FlaxBertForNextSentencePrediction, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertForQuestionAnswering, ) if is_flax_available() else () ) def lowercase__ ( self : Any ): """simple docstring""" lowercase_ :Optional[Any] = FlaxBertModelTester(self ) @slow def lowercase__ ( self : List[str] ): """simple docstring""" lowercase_ :List[str] = FlaxBertModel.from_pretrained("bert-base-cased" ) lowercase_ :str = model(np.ones((1, 1) ) ) self.assertIsNotNone(lowercase )
223
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available A_ : Optional[Any] = { """configuration_lilt""": ["""LILT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LiltConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : str = [ """LILT_PRETRAINED_MODEL_ARCHIVE_LIST""", """LiltForQuestionAnswering""", """LiltForSequenceClassification""", """LiltForTokenClassification""", """LiltModel""", """LiltPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_lilt import ( LILT_PRETRAINED_MODEL_ARCHIVE_LIST, LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, LiltPreTrainedModel, ) else: import sys A_ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
349
'''simple docstring''' import math def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> int: '''simple docstring''' _UpperCAmelCase : str = len(lowerCAmelCase_ ) _UpperCAmelCase : List[str] = int(math.floor(math.sqrt(lowerCAmelCase_ ) ) ) _UpperCAmelCase : int = 0 while arr[min(lowerCAmelCase_ , lowerCAmelCase_ ) - 1] < x: _UpperCAmelCase : Optional[int] = step step += int(math.floor(math.sqrt(lowerCAmelCase_ ) ) ) if prev >= n: return -1 while arr[prev] < x: _UpperCAmelCase : List[Any] = prev + 1 if prev == min(lowerCAmelCase_ , lowerCAmelCase_ ): return -1 if arr[prev] == x: return prev return -1 if __name__ == "__main__": A_ : str = input("""Enter numbers separated by a comma:\n""").strip() A_ : Union[str, Any] = [int(item) for item in user_input.split(""",""")] A_ : int = int(input("""Enter the number to be searched:\n""")) A_ : Any = jump_search(arr, x) if res == -1: print("""Number not found!""") else: print(f"""Number {x} is at index {res}""")
349
1
'''simple docstring''' import re import string import numpy as np import datasets __a = "\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n" __a = "\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results[\"exact_match\"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]\n >>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 33.3\n\n" __a = "\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCAmelCase_ ( datasets.Metric ): """simple docstring""" def lowerCamelCase ( self : Union[str, Any] ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""string""" , id="""sequence""" ), """references""": datasets.Value("""string""" , id="""sequence""" ), } ) , reference_urls=[] , ) def lowerCamelCase ( self : Tuple , snake_case_ : int , snake_case_ : Tuple , snake_case_ : List[Any]=None , snake_case_ : Any=False , snake_case_ : Optional[int]=False , snake_case_ : str=False , ): if regexes_to_ignore is not None: for s in regexes_to_ignore: snake_case__ : int = np.array([re.sub(snake_case_ , """""" , snake_case_ ) for x in predictions] ) snake_case__ : Any = np.array([re.sub(snake_case_ , """""" , snake_case_ ) for x in references] ) else: snake_case__ : int = np.asarray(snake_case_ ) snake_case__ : Optional[int] = np.asarray(snake_case_ ) if ignore_case: snake_case__ : Dict = np.char.lower(snake_case_ ) snake_case__ : Tuple = np.char.lower(snake_case_ ) if ignore_punctuation: snake_case__ : str = string.punctuation.maketrans("""""" , """""" , string.punctuation ) snake_case__ : Tuple = np.char.translate(snake_case_ , table=snake_case_ ) snake_case__ : Dict = np.char.translate(snake_case_ , table=snake_case_ ) if ignore_numbers: snake_case__ : Any = string.digits.maketrans("""""" , """""" , string.digits ) snake_case__ : Union[str, Any] = np.char.translate(snake_case_ , table=snake_case_ ) snake_case__ : Union[str, Any] = np.char.translate(snake_case_ , table=snake_case_ ) snake_case__ : Tuple = predictions == references return {"exact_match": np.mean(snake_case_ ) * 100}
35
import json import os import torch from diffusers import UNetaDModel os.makedirs("""hub/hopper-medium-v2/unet/hor32""", exist_ok=True) os.makedirs("""hub/hopper-medium-v2/unet/hor128""", exist_ok=True) os.makedirs("""hub/hopper-medium-v2/value_function""", exist_ok=True) def UpperCamelCase ( __lowercase : int ): '''simple docstring''' if hor == 1_28: A_ : List[Any] = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D') A_ : Tuple = (32, 1_28, 2_56) A_ : Optional[int] = ('UpResnetBlock1D', 'UpResnetBlock1D') elif hor == 32: A_ : Union[str, Any] = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D') A_ : Any = (32, 64, 1_28, 2_56) A_ : int = ('UpResnetBlock1D', 'UpResnetBlock1D', 'UpResnetBlock1D') A_ : List[str] = torch.load(f'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' ) A_ : List[Any] = model.state_dict() A_ : List[str] = { 'down_block_types': down_block_types, 'block_out_channels': block_out_channels, 'up_block_types': up_block_types, 'layers_per_block': 1, 'use_timestep_embedding': True, 'out_block_type': 'OutConv1DBlock', 'norm_num_groups': 8, 'downsample_each_block': False, 'in_channels': 14, 'out_channels': 14, 'extra_in_channels': 0, 'time_embedding_type': 'positional', 'flip_sin_to_cos': False, 'freq_shift': 1, 'sample_size': 6_55_36, 'mid_block_type': 'MidResTemporalBlock1D', 'act_fn': 'mish', } A_ : Union[str, Any] = UNetaDModel(**__lowercase ) print(f'''length of state dict: {len(state_dict.keys() )}''' ) print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' ) A_ : Optional[Any] = dict(zip(model.state_dict().keys() ,hf_value_function.state_dict().keys() ) ) for k, v in mapping.items(): A_ : Optional[int] = state_dict.pop(__lowercase ) hf_value_function.load_state_dict(__lowercase ) torch.save(hf_value_function.state_dict() ,f'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' ) with open(f'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' ,'w' ) as f: json.dump(__lowercase ,__lowercase ) def UpperCamelCase ( ): '''simple docstring''' A_ : Any = { 'in_channels': 14, 'down_block_types': ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D'), 'up_block_types': (), 'out_block_type': 'ValueFunction', 'mid_block_type': 'ValueFunctionMidBlock1D', 'block_out_channels': (32, 64, 1_28, 2_56), 'layers_per_block': 1, 'downsample_each_block': True, 'sample_size': 6_55_36, 'out_channels': 14, 'extra_in_channels': 0, 'time_embedding_type': 'positional', 'use_timestep_embedding': True, 'flip_sin_to_cos': False, 'freq_shift': 1, 'norm_num_groups': 8, 'act_fn': 'mish', } A_ : Union[str, Any] = torch.load('/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch' ) A_ : List[Any] = model A_ : Union[str, Any] = UNetaDModel(**__lowercase ) print(f'''length of state dict: {len(state_dict.keys() )}''' ) print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' ) A_ : Optional[int] = dict(zip(state_dict.keys() ,hf_value_function.state_dict().keys() ) ) for k, v in mapping.items(): A_ : List[str] = state_dict.pop(__lowercase ) hf_value_function.load_state_dict(__lowercase ) torch.save(hf_value_function.state_dict() ,'hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin' ) with open('hub/hopper-medium-v2/value_function/config.json' ,'w' ) as f: json.dump(__lowercase ,__lowercase ) if __name__ == "__main__": unet(32) # unet(128) value_function()
140
0
import random class lowerCamelCase__ : '''simple docstring''' @staticmethod def _lowerCamelCase ( a :str ) -> tuple[list[int], list[int]]: __UpperCamelCase : List[Any] = [ord(a ) for i in text] __UpperCamelCase : Optional[int] = [] __UpperCamelCase : Dict = [] for i in plain: __UpperCamelCase : Dict = random.randint(1 , 3_0_0 ) __UpperCamelCase : Union[str, Any] = (i + k) * k cipher.append(a ) key.append(a ) return cipher, key @staticmethod def _lowerCamelCase ( a :list[int] , a :list[int] ) -> str: __UpperCamelCase : Any = [] for i in range(len(a ) ): __UpperCamelCase : int = int((cipher[i] - (key[i]) ** 2) / key[i] ) plain.append(chr(a ) ) return "".join(a ) if __name__ == "__main__": lowercase : List[str] = Onepad().encrypt('Hello') print(c, k) print(Onepad().decrypt(c, k))
360
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] , _lowerCamelCase : str) -> Any: '''simple docstring''' __UpperCamelCase : Dict = 0 while b > 0: if b & 1: res += a a += a b >>= 1 return res def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Tuple) -> Any: '''simple docstring''' __UpperCamelCase : Union[str, Any] = 0 while b > 0: if b & 1: __UpperCamelCase : str = ((res % c) + (a % c)) % c a += a b >>= 1 return res
151
0
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_camembert import CamembertTokenizer else: _lowerCamelCase =None _lowerCamelCase =logging.get_logger(__name__) _lowerCamelCase ={"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"} _lowerCamelCase ={ "vocab_file": { "camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model", }, "tokenizer_file": { "camembert-base": "https://huggingface.co/camembert-base/resolve/main/tokenizer.json", }, } _lowerCamelCase ={ "camembert-base": 5_12, } _lowerCamelCase ="▁" class a_ ( lowerCamelCase_ ): """simple docstring""" __UpperCAmelCase = VOCAB_FILES_NAMES __UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCAmelCase = ['input_ids', 'attention_mask'] __UpperCAmelCase = CamembertTokenizer def __init__( self : List[str] ,snake_case : str=None ,snake_case : Dict=None ,snake_case : int="<s>" ,snake_case : Optional[int]="</s>" ,snake_case : Optional[Any]="</s>" ,snake_case : Union[str, Any]="<s>" ,snake_case : Optional[int]="<unk>" ,snake_case : Optional[Any]="<pad>" ,snake_case : str="<mask>" ,snake_case : Tuple=["<s>NOTUSED", "</s>NOTUSED"] ,**snake_case : Any ,): # Mask token behave like a normal word, i.e. include the space before it SCREAMING_SNAKE_CASE =AddedToken(snake_case ,lstrip=snake_case ,rstrip=snake_case ) if isinstance(snake_case ,snake_case ) else mask_token super().__init__( snake_case ,tokenizer_file=snake_case ,bos_token=snake_case ,eos_token=snake_case ,sep_token=snake_case ,cls_token=snake_case ,unk_token=snake_case ,pad_token=snake_case ,mask_token=snake_case ,additional_special_tokens=snake_case ,**snake_case ,) SCREAMING_SNAKE_CASE =vocab_file SCREAMING_SNAKE_CASE =False if not self.vocab_file else True def _lowerCAmelCase ( self : Dict ,snake_case : List[int] ,snake_case : Optional[List[int]] = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] SCREAMING_SNAKE_CASE =[self.cls_token_id] SCREAMING_SNAKE_CASE =[self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _lowerCAmelCase ( self : int ,snake_case : List[int] ,snake_case : Optional[List[int]] = None ): SCREAMING_SNAKE_CASE =[self.sep_token_id] SCREAMING_SNAKE_CASE =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _lowerCAmelCase ( self : str ,snake_case : str ,snake_case : Optional[str] = None ): if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.' ) if not os.path.isdir(snake_case ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return SCREAMING_SNAKE_CASE =os.path.join( snake_case ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ): copyfile(self.vocab_file ,snake_case ) return (out_vocab_file,)
334
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from ...utils import logging from ..auto import CONFIG_MAPPING _lowerCamelCase =logging.get_logger(__name__) _lowerCamelCase ={ "salesforce/blip2-opt-2.7b": "https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json", } class a_ ( lowerCamelCase_ ): """simple docstring""" __UpperCAmelCase = 'blip_2_vision_model' def __init__( self : List[Any] ,snake_case : List[Any]=1408 ,snake_case : Optional[Any]=6144 ,snake_case : Optional[int]=39 ,snake_case : Optional[int]=16 ,snake_case : Optional[Any]=224 ,snake_case : Tuple=14 ,snake_case : Optional[Any]="gelu" ,snake_case : Union[str, Any]=0.00_001 ,snake_case : Dict=0.0 ,snake_case : Union[str, Any]=1e-10 ,snake_case : int=True ,**snake_case : str ,): super().__init__(**snake_case ) SCREAMING_SNAKE_CASE =hidden_size SCREAMING_SNAKE_CASE =intermediate_size SCREAMING_SNAKE_CASE =num_hidden_layers SCREAMING_SNAKE_CASE =num_attention_heads SCREAMING_SNAKE_CASE =patch_size SCREAMING_SNAKE_CASE =image_size SCREAMING_SNAKE_CASE =initializer_range SCREAMING_SNAKE_CASE =attention_dropout SCREAMING_SNAKE_CASE =layer_norm_eps SCREAMING_SNAKE_CASE =hidden_act SCREAMING_SNAKE_CASE =qkv_bias @classmethod def _lowerCAmelCase ( cls : Dict ,snake_case : Union[str, os.PathLike] ,**snake_case : str ): cls._set_token_in_kwargs(snake_case ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =cls.get_config_dict(snake_case ,**snake_case ) # get the vision config dict if we are loading from Blip2Config if config_dict.get('model_type' ) == "blip-2": SCREAMING_SNAKE_CASE =config_dict['vision_config'] if "model_type" in config_dict and hasattr(cls ,'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(snake_case ,**snake_case ) class a_ ( lowerCamelCase_ ): """simple docstring""" __UpperCAmelCase = 'blip_2_qformer' def __init__( self : Any ,snake_case : Dict=30522 ,snake_case : int=768 ,snake_case : List[Any]=12 ,snake_case : List[str]=12 ,snake_case : Optional[Any]=3072 ,snake_case : str="gelu" ,snake_case : Optional[Any]=0.1 ,snake_case : Union[str, Any]=0.1 ,snake_case : Optional[Any]=512 ,snake_case : List[Any]=0.02 ,snake_case : List[str]=1e-12 ,snake_case : Tuple=0 ,snake_case : Union[str, Any]="absolute" ,snake_case : List[Any]=2 ,snake_case : List[str]=1408 ,**snake_case : Optional[Any] ,): super().__init__(pad_token_id=snake_case ,**snake_case ) SCREAMING_SNAKE_CASE =vocab_size SCREAMING_SNAKE_CASE =hidden_size SCREAMING_SNAKE_CASE =num_hidden_layers SCREAMING_SNAKE_CASE =num_attention_heads SCREAMING_SNAKE_CASE =hidden_act SCREAMING_SNAKE_CASE =intermediate_size SCREAMING_SNAKE_CASE =hidden_dropout_prob SCREAMING_SNAKE_CASE =attention_probs_dropout_prob SCREAMING_SNAKE_CASE =max_position_embeddings SCREAMING_SNAKE_CASE =initializer_range SCREAMING_SNAKE_CASE =layer_norm_eps SCREAMING_SNAKE_CASE =position_embedding_type SCREAMING_SNAKE_CASE =cross_attention_frequency SCREAMING_SNAKE_CASE =encoder_hidden_size @classmethod def _lowerCAmelCase ( cls : List[Any] ,snake_case : Union[str, os.PathLike] ,**snake_case : Dict ): cls._set_token_in_kwargs(snake_case ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =cls.get_config_dict(snake_case ,**snake_case ) # get the qformer config dict if we are loading from Blip2Config if config_dict.get('model_type' ) == "blip-2": SCREAMING_SNAKE_CASE =config_dict['qformer_config'] if "model_type" in config_dict and hasattr(cls ,'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(snake_case ,**snake_case ) class a_ ( lowerCamelCase_ ): """simple docstring""" __UpperCAmelCase = 'blip-2' __UpperCAmelCase = True def __init__( self : int ,snake_case : Dict=None ,snake_case : Tuple=None ,snake_case : str=None ,snake_case : Union[str, Any]=32 ,**snake_case : int ): super().__init__(**snake_case ) if vision_config is None: SCREAMING_SNAKE_CASE ={} logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' ) if qformer_config is None: SCREAMING_SNAKE_CASE ={} logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' ) if text_config is None: SCREAMING_SNAKE_CASE ={} logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' ) SCREAMING_SNAKE_CASE =BlipaVisionConfig(**snake_case ) SCREAMING_SNAKE_CASE =BlipaQFormerConfig(**snake_case ) SCREAMING_SNAKE_CASE =text_config['model_type'] if 'model_type' in text_config else 'opt' SCREAMING_SNAKE_CASE =CONFIG_MAPPING[text_model_type](**snake_case ) SCREAMING_SNAKE_CASE =self.text_config.tie_word_embeddings SCREAMING_SNAKE_CASE =self.text_config.is_encoder_decoder SCREAMING_SNAKE_CASE =num_query_tokens SCREAMING_SNAKE_CASE =self.vision_config.hidden_size SCREAMING_SNAKE_CASE =self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES SCREAMING_SNAKE_CASE =1.0 SCREAMING_SNAKE_CASE =0.02 @classmethod def _lowerCAmelCase ( cls : Union[str, Any] ,snake_case : BlipaVisionConfig ,snake_case : BlipaQFormerConfig ,snake_case : PretrainedConfig ,**snake_case : Any ,): return cls( vision_config=vision_config.to_dict() ,qformer_config=qformer_config.to_dict() ,text_config=text_config.to_dict() ,**snake_case ,) def _lowerCAmelCase ( self : Optional[Any] ): SCREAMING_SNAKE_CASE =copy.deepcopy(self.__dict__ ) SCREAMING_SNAKE_CASE =self.vision_config.to_dict() SCREAMING_SNAKE_CASE =self.qformer_config.to_dict() SCREAMING_SNAKE_CASE =self.text_config.to_dict() SCREAMING_SNAKE_CASE =self.__class__.model_type return output
334
1
'''simple docstring''' import io import json import unittest from parameterized import parameterized from transformers import FSMTForConditionalGeneration, FSMTTokenizer from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device from utils import calculate_bleu __snake_case : str = get_tests_dir() + '''/test_data/fsmt/fsmt_val_data.json''' with io.open(filename, 'r', encoding='utf-8') as f: __snake_case : Optional[Any] = json.load(f) @require_torch class __UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def __A ( self , _SCREAMING_SNAKE_CASE ) -> List[str]: return FSMTTokenizer.from_pretrained(_lowerCamelCase ) def __A ( self , _SCREAMING_SNAKE_CASE ) -> Tuple: A_ = FSMTForConditionalGeneration.from_pretrained(_lowerCamelCase ).to(_lowerCamelCase ) if torch_device == "cuda": model.half() return model @parameterized.expand( [ ['''en-ru''', 26.0], ['''ru-en''', 22.0], ['''en-de''', 22.0], ['''de-en''', 29.0], ] ) @slow def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str: # note: this test is not testing the best performance since it only evals a small batch # but it should be enough to detect a regression in the output quality A_ = F'''facebook/wmt19-{pair}''' A_ = self.get_tokenizer(_lowerCamelCase ) A_ = self.get_model(_lowerCamelCase ) A_ = bleu_data[pair]['''src'''] A_ = bleu_data[pair]['''tgt'''] A_ = tokenizer(_lowerCamelCase , return_tensors='''pt''' , truncation=_lowerCamelCase , padding='''longest''' ).to(_lowerCamelCase ) A_ = model.generate( input_ids=batch.input_ids , num_beams=8 , ) A_ = tokenizer.batch_decode( _lowerCamelCase , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase ) A_ = calculate_bleu(_lowerCamelCase , _lowerCamelCase ) print(_lowerCamelCase ) self.assertGreaterEqual(scores['''bleu'''] , _lowerCamelCase )
368
'''simple docstring''' import absl # noqa: F401 # Here to have a nice missing dependency error message early on import nltk # noqa: F401 # Here to have a nice missing dependency error message early on import numpy # noqa: F401 # Here to have a nice missing dependency error message early on import six # noqa: F401 # Here to have a nice missing dependency error message early on from rouge_score import rouge_scorer, scoring import datasets __snake_case : Any = '\\n@inproceedings{lin-2004-rouge,\n title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",\n author = "Lin, Chin-Yew",\n booktitle = "Text Summarization Branches Out",\n month = jul,\n year = "2004",\n address = "Barcelona, Spain",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W04-1013",\n pages = "74--81",\n}\n' __snake_case : Dict = '\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n' __snake_case : Optional[int] = '\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,\n `"rougeL"`: Longest common subsequence based scoring.\n `"rougeLSum"`: rougeLsum splits text using `"\n"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric(\'rouge\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n [\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']\n >>> print(results["rouge1"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results["rouge1"].mid.fmeasure)\n 1.0\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __UpperCAmelCase ( datasets.Metric ): '''simple docstring''' def __A ( self ) -> List[str]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Value('''string''' , id='''sequence''' ), } ) , codebase_urls=['''https://github.com/google-research/google-research/tree/master/rouge'''] , reference_urls=[ '''https://en.wikipedia.org/wiki/ROUGE_(metric)''', '''https://github.com/google-research/google-research/tree/master/rouge''', ] , ) def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False ) -> Optional[int]: if rouge_types is None: A_ = ['''rouge1''', '''rouge2''', '''rougeL''', '''rougeLsum'''] A_ = rouge_scorer.RougeScorer(rouge_types=_SCREAMING_SNAKE_CASE , use_stemmer=_SCREAMING_SNAKE_CASE ) if use_aggregator: A_ = scoring.BootstrapAggregator() else: A_ = [] for ref, pred in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): A_ = scorer.score(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if use_aggregator: aggregator.add_scores(_SCREAMING_SNAKE_CASE ) else: scores.append(_SCREAMING_SNAKE_CASE ) if use_aggregator: A_ = aggregator.aggregate() else: A_ = {} for key in scores[0]: A_ = [score[key] for score in scores] return result
18
0
from collections import OrderedDict from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import TensorType, logging if TYPE_CHECKING: from ...onnx.config import PatchingSpec from ...tokenization_utils_base import PreTrainedTokenizerBase lowercase = logging.get_logger(__name__) lowercase = { "allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json", "allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json", "allenai/longformer-large-4096-finetuned-triviaqa": ( "https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json" ), "allenai/longformer-base-4096-extra.pos.embd.only": ( "https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json" ), "allenai/longformer-large-4096-extra.pos.embd.only": ( "https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json" ), } class UpperCamelCase_ ( snake_case_ ): '''simple docstring''' lowerCAmelCase = '''longformer''' def __init__( self , a = 5_12 , a = 2 , a = 1 , a = 0 , a = 2 , a = 3_05_22 , a = 7_68 , a = 12 , a = 12 , a = 30_72 , a = "gelu" , a = 0.1 , a = 0.1 , a = 5_12 , a = 2 , a = 0.02 , a = 1E-12 , a = False , **a , ) -> Dict: super().__init__(pad_token_id=a , **a ) snake_case_ = attention_window snake_case_ = sep_token_id snake_case_ = bos_token_id snake_case_ = eos_token_id snake_case_ = vocab_size snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = hidden_act snake_case_ = intermediate_size snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = max_position_embeddings snake_case_ = type_vocab_size snake_case_ = initializer_range snake_case_ = layer_norm_eps snake_case_ = onnx_export class UpperCamelCase_ ( snake_case_ ): '''simple docstring''' def __init__( self , a , a = "default" , a = None ) -> int: super().__init__(a , a , a ) snake_case_ = True @property def _UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": snake_case_ = {0: 'batch', 1: 'choice', 2: 'sequence'} else: snake_case_ = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('global_attention_mask', dynamic_axis), ] ) @property def _UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]: snake_case_ = super().outputs if self.task == "default": snake_case_ = {0: 'batch'} return outputs @property def _UpperCamelCase ( self ) -> float: return 1E-4 @property def _UpperCamelCase ( self ) -> int: # needs to be >= 14 to support tril operator return max(super().default_onnx_opset , 14 ) def _UpperCamelCase ( self , a , a = -1 , a = -1 , a = False , a = None , ) -> Mapping[str, Any]: snake_case_ = super().generate_dummy_inputs( preprocessor=a , batch_size=a , seq_length=a , is_pair=a , framework=a ) import torch # for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64) # makes the export fail randomly snake_case_ = torch.zeros_like(inputs['input_ids'] ) # make every second token global snake_case_ = 1 return inputs
178
import numpy as np import pandas as pd from sklearn.preprocessing import Normalizer from sklearn.svm import SVR from statsmodels.tsa.statespace.sarimax import SARIMAX def __UpperCAmelCase ( a_ , a_ , a_ , a_ , a_): snake_case_ = np.array([[1, item, train_mtch[i]] for i, item in enumerate(a_)]) snake_case_ = np.array(a_) snake_case_ = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , a_)) , x.transpose()) , a_) return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2]) def __UpperCAmelCase ( a_ , a_ , a_): snake_case_ = (1, 2, 1) snake_case_ = (1, 1, 0, 7) snake_case_ = SARIMAX( a_ , exog=a_ , order=a_ , seasonal_order=a_) snake_case_ = model.fit(disp=a_ , maxiter=6_00 , method='nm') snake_case_ = model_fit.predict(1 , len(a_) , exog=[test_match]) return result[0] def __UpperCAmelCase ( a_ , a_ , a_): snake_case_ = SVR(kernel='rbf' , C=1 , gamma=0.1 , epsilon=0.1) regressor.fit(a_ , a_) snake_case_ = regressor.predict(a_) return y_pred[0] def __UpperCAmelCase ( a_): train_user.sort() snake_case_ = np.percentile(a_ , 25) snake_case_ = np.percentile(a_ , 75) snake_case_ = qa - qa snake_case_ = qa - (iqr * 0.1) return low_lim def __UpperCAmelCase ( a_ , a_): snake_case_ = 0 snake_case_ = 0 for i in list_vote: if i > actual_result: snake_case_ = not_safe + 1 else: if abs(abs(a_) - abs(a_)) <= 0.1: safe += 1 else: not_safe += 1 return safe > not_safe if __name__ == "__main__": # data_input_df = pd.read_csv("ex_data.csv", header=None) lowercase = [[18231, 0.0, 1], [22621, 1.0, 2], [15675, 0.0, 3], [23583, 1.0, 4]] lowercase = pd.DataFrame( data_input, columns=["total_user", "total_even", "days"] ) lowercase = Normalizer().fit_transform(data_input_df.values) # split data lowercase = normalize_df[:, 2].tolist() lowercase = normalize_df[:, 0].tolist() lowercase = normalize_df[:, 1].tolist() # for svr (input variable = total date and total match) lowercase = normalize_df[:, [1, 2]].tolist() lowercase = x[: len(x) - 1] lowercase = x[len(x) - 1 :] # for linear regression & sarimax lowercase = total_date[: len(total_date) - 1] lowercase = total_user[: len(total_user) - 1] lowercase = total_match[: len(total_match) - 1] lowercase = total_date[len(total_date) - 1 :] lowercase = total_user[len(total_user) - 1 :] lowercase = total_match[len(total_match) - 1 :] # voting system with forecasting lowercase = [ linear_regression_prediction( trn_date, trn_user, trn_match, tst_date, tst_match ), sarimax_predictor(trn_user, trn_match, tst_match), support_vector_regressor(x_train, x_test, trn_user), ] # check the safety of today's data lowercase = "" if data_safety_checker(res_vote, tst_user) else "not " print("Today's data is {not_str}safe.")
178
1
'''simple docstring''' from unittest.mock import Mock, patch from file_transfer.send_file import send_file @patch('''socket.socket''' ) @patch('''builtins.open''' ) def a__ ( lowercase : Optional[int], lowercase : Optional[int] ) -> Any: """simple docstring""" _UpperCamelCase = Mock() _UpperCamelCase = conn, Mock() _UpperCamelCase = iter([1, None] ) _UpperCamelCase = lambda lowercase : next(lowercase ) # ===== invoke ===== send_file(filename='''mytext.txt''', testing=lowercase ) # ===== ensurance ===== sock.assert_called_once() sock.return_value.bind.assert_called_once() sock.return_value.listen.assert_called_once() sock.return_value.accept.assert_called_once() conn.recv.assert_called_once() file.return_value.__enter__.assert_called_once() file.return_value.__enter__.return_value.read.assert_called() conn.send.assert_called_once() conn.close.assert_called_once() sock.return_value.shutdown.assert_called_once() sock.return_value.close.assert_called_once()
367
'''simple docstring''' import re import time from typing import Optional import IPython.display as disp from ..trainer_callback import TrainerCallback from ..trainer_utils import IntervalStrategy, has_length def a__ ( lowercase : Tuple ) -> Dict: """simple docstring""" _UpperCamelCase = int(lowercase ) _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = t // 3600, (t // 60) % 60, t % 60 return F"""{h}:{m:02d}:{s:02d}""" if h != 0 else F"""{m:02d}:{s:02d}""" def a__ ( lowercase : List[Any], lowercase : Dict, lowercase : Optional[int], lowercase : Union[str, Any], lowercase : Any=300 ) -> Any: """simple docstring""" return F""" <div> {prefix} <progress value='{value}' max='{total}' style='width:{width}px; height:20px; vertical-align: middle;'></progress> {label} </div> """ def a__ ( lowercase : Optional[Any] ) -> Any: """simple docstring""" _UpperCamelCase = '''<table border="1" class="dataframe">\n''' html_code += """ <thead>\n <tr style="text-align: left;">\n""" for i in items[0]: html_code += F""" <th>{i}</th>\n""" html_code += " </tr>\n </thead>\n <tbody>\n" for line in items[1:]: html_code += " <tr>\n" for elt in line: _UpperCamelCase = F"""{elt:.6f}""" if isinstance(lowercase, lowercase ) else str(lowercase ) html_code += F""" <td>{elt}</td>\n""" html_code += " </tr>\n" html_code += " </tbody>\n</table><p>" return html_code class __lowerCAmelCase : """simple docstring""" _snake_case : str = 5 _snake_case : Optional[int] = 0.2 def __init__( self : Dict , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[str] = None , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Optional["NotebookTrainingTracker"] = None , lowerCAmelCase__ : int = 300 , ) -> int: '''simple docstring''' _UpperCamelCase = total _UpperCamelCase = '''''' if prefix is None else prefix _UpperCamelCase = leave _UpperCamelCase = parent _UpperCamelCase = width _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = None def snake_case__ ( self : str , lowerCAmelCase__ : int , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : str = None ) -> Dict: '''simple docstring''' _UpperCamelCase = value if comment is not None: _UpperCamelCase = comment if self.last_value is None: _UpperCamelCase = _UpperCamelCase = time.time() _UpperCamelCase = _UpperCamelCase = value _UpperCamelCase = _UpperCamelCase = None _UpperCamelCase = self.warmup _UpperCamelCase = 1 self.update_bar(lowerCAmelCase__ ) elif value <= self.last_value and not force_update: return elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ): if self.first_calls > 0: self.first_calls -= 1 _UpperCamelCase = time.time() _UpperCamelCase = current_time - self.start_time # We could have value = self.start_value if the update is called twixe with the same start value. if value > self.start_value: _UpperCamelCase = self.elapsed_time / (value - self.start_value) else: _UpperCamelCase = None if value >= self.total: _UpperCamelCase = self.total _UpperCamelCase = None if not self.leave: self.close() elif self.average_time_per_item is not None: _UpperCamelCase = self.average_time_per_item * (self.total - value) self.update_bar(lowerCAmelCase__ ) _UpperCamelCase = value _UpperCamelCase = current_time if self.average_time_per_item is None: _UpperCamelCase = 1 else: _UpperCamelCase = max(int(self.update_every / self.average_time_per_item ) , 1 ) def snake_case__ ( self : str , lowerCAmelCase__ : int , lowerCAmelCase__ : Tuple=None ) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = ''' ''' * (len(str(self.total ) ) - len(str(lowerCAmelCase__ ) )) + str(lowerCAmelCase__ ) if self.elapsed_time is None: _UpperCamelCase = f"""[{spaced_value}/{self.total} : < :""" elif self.predicted_remaining is None: _UpperCamelCase = f"""[{spaced_value}/{self.total} {format_time(self.elapsed_time )}""" else: _UpperCamelCase = ( f"""[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <""" f""" {format_time(self.predicted_remaining )}""" ) self.label += f""", {1/self.average_time_per_item:.2f} it/s""" self.label += "]" if self.comment is None or len(self.comment ) == 0 else f""", {self.comment}]""" self.display() def snake_case__ ( self : List[Any] ) -> Tuple: '''simple docstring''' _UpperCamelCase = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width ) if self.parent is not None: # If this is a child bar, the parent will take care of the display. self.parent.display() return if self.output is None: _UpperCamelCase = disp.display(disp.HTML(self.html_code ) , display_id=lowerCAmelCase__ ) else: self.output.update(disp.HTML(self.html_code ) ) def snake_case__ ( self : Tuple ) -> Any: '''simple docstring''' if self.parent is None and self.output is not None: self.output.update(disp.HTML('''''' ) ) class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" def __init__( self : Any , lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any]=None ) -> Dict: '''simple docstring''' super().__init__(lowerCAmelCase__ ) _UpperCamelCase = None if column_names is None else [column_names] _UpperCamelCase = None def snake_case__ ( self : List[Any] ) -> Any: '''simple docstring''' _UpperCamelCase = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width ) if self.inner_table is not None: self.html_code += text_to_html_table(self.inner_table ) if self.child_bar is not None: self.html_code += self.child_bar.html_code if self.output is None: _UpperCamelCase = disp.display(disp.HTML(self.html_code ) , display_id=lowerCAmelCase__ ) else: self.output.update(disp.HTML(self.html_code ) ) def snake_case__ ( self : Optional[int] , lowerCAmelCase__ : int ) -> Union[str, Any]: '''simple docstring''' if self.inner_table is None: _UpperCamelCase = [list(values.keys() ), list(values.values() )] else: _UpperCamelCase = self.inner_table[0] if len(self.inner_table ) == 1: # We give a chance to update the column names at the first iteration for key in values.keys(): if key not in columns: columns.append(lowerCAmelCase__ ) _UpperCamelCase = columns self.inner_table.append([values[c] for c in columns] ) def snake_case__ ( self : Dict , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : List[str]=300 ) -> int: '''simple docstring''' _UpperCamelCase = NotebookProgressBar(lowerCAmelCase__ , prefix=lowerCAmelCase__ , parent=self , width=lowerCAmelCase__ ) return self.child_bar def snake_case__ ( self : Any ) -> str: '''simple docstring''' _UpperCamelCase = None self.display() class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" def __init__( self : str ) -> Optional[int]: '''simple docstring''' _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = False def snake_case__ ( self : List[str] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : str , **lowerCAmelCase__ : Any ) -> Dict: '''simple docstring''' _UpperCamelCase = '''Epoch''' if args.evaluation_strategy == IntervalStrategy.EPOCH else '''Step''' _UpperCamelCase = 0 _UpperCamelCase = 0 _UpperCamelCase = [self.first_column] + ['''Training Loss'''] if args.evaluation_strategy != IntervalStrategy.NO: column_names.append('''Validation Loss''' ) _UpperCamelCase = NotebookTrainingTracker(state.max_steps , lowerCAmelCase__ ) def snake_case__ ( self : Optional[int] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict , **lowerCAmelCase__ : Dict ) -> Dict: '''simple docstring''' _UpperCamelCase = int(state.epoch ) if int(state.epoch ) == state.epoch else f"""{state.epoch:.2f}""" self.training_tracker.update( state.global_step + 1 , comment=f"""Epoch {epoch}/{state.num_train_epochs}""" , force_update=self._force_next_update , ) _UpperCamelCase = False def snake_case__ ( self : Dict , lowerCAmelCase__ : Any , lowerCAmelCase__ : Dict , lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any]=None , **lowerCAmelCase__ : Dict ) -> Dict: '''simple docstring''' if not has_length(lowerCAmelCase__ ): return if self.prediction_bar is None: if self.training_tracker is not None: _UpperCamelCase = self.training_tracker.add_child(len(lowerCAmelCase__ ) ) else: _UpperCamelCase = NotebookProgressBar(len(lowerCAmelCase__ ) ) self.prediction_bar.update(1 ) else: self.prediction_bar.update(self.prediction_bar.value + 1 ) def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Dict , **lowerCAmelCase__ : Any ) -> Optional[int]: '''simple docstring''' if self.prediction_bar is not None: self.prediction_bar.close() _UpperCamelCase = None def snake_case__ ( self : Optional[int] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[int]=None , **lowerCAmelCase__ : Optional[int] ) -> Tuple: '''simple docstring''' if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs: _UpperCamelCase = {'''Training Loss''': logs['''loss''']} # First column is necessarily Step sine we're not in epoch eval strategy _UpperCamelCase = state.global_step self.training_tracker.write_line(lowerCAmelCase__ ) def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any]=None , **lowerCAmelCase__ : List[str] ) -> List[str]: '''simple docstring''' if self.training_tracker is not None: _UpperCamelCase = {'''Training Loss''': '''No log''', '''Validation Loss''': '''No log'''} for log in reversed(state.log_history ): if "loss" in log: _UpperCamelCase = log['''loss'''] break if self.first_column == "Epoch": _UpperCamelCase = int(state.epoch ) else: _UpperCamelCase = state.global_step _UpperCamelCase = '''eval''' for k in metrics: if k.endswith('''_loss''' ): _UpperCamelCase = re.sub(r'''\_loss$''' , '''''' , lowerCAmelCase__ ) _UpperCamelCase = metrics.pop('''total_flos''' , lowerCAmelCase__ ) _UpperCamelCase = metrics.pop('''epoch''' , lowerCAmelCase__ ) _UpperCamelCase = metrics.pop(f"""{metric_key_prefix}_runtime""" , lowerCAmelCase__ ) _UpperCamelCase = metrics.pop(f"""{metric_key_prefix}_samples_per_second""" , lowerCAmelCase__ ) _UpperCamelCase = metrics.pop(f"""{metric_key_prefix}_steps_per_second""" , lowerCAmelCase__ ) _UpperCamelCase = metrics.pop(f"""{metric_key_prefix}_jit_compilation_time""" , lowerCAmelCase__ ) for k, v in metrics.items(): if k == f"""{metric_key_prefix}_loss""": _UpperCamelCase = v else: _UpperCamelCase = k.split('''_''' ) _UpperCamelCase = ''' '''.join([part.capitalize() for part in splits[1:]] ) _UpperCamelCase = v self.training_tracker.write_line(lowerCAmelCase__ ) self.training_tracker.remove_child() _UpperCamelCase = None # Evaluation takes a long time so we should force the next update. _UpperCamelCase = True def snake_case__ ( self : int , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , **lowerCAmelCase__ : List[str] ) -> Optional[Any]: '''simple docstring''' self.training_tracker.update( state.global_step , comment=f"""Epoch {int(state.epoch )}/{state.num_train_epochs}""" , force_update=lowerCAmelCase__ ) _UpperCamelCase = None
287
0
def a ( A__ : int = 1000 ) -> int: """simple docstring""" _lowercase =3 _lowercase =0 while a < n: if a % 3 == 0 or a % 5 == 0: result += a elif a % 15 == 0: result -= a a += 1 return result if __name__ == "__main__": print(f"{solution() = }")
205
import argparse import os import pickle import sys import torch from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() # We do this to be able to load python 2 datasets pickles # See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918 lowercase_ = data_utils.TransfoXLTokenizer lowercase_ = data_utils.TransfoXLCorpus lowercase_ = data_utils lowercase_ = data_utils def a ( A__ : int , A__ : Dict , A__ : Union[str, Any] , A__ : Union[str, Any] ) -> List[str]: """simple docstring""" if transfo_xl_dataset_file: # Convert a pre-processed corpus (see original TensorFlow repo) with open(A__ , 'rb' ) as fp: _lowercase =pickle.load(A__ , encoding='latin1' ) # Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term) _lowercase =pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['pretrained_vocab_file'] print(F'''Save vocabulary to {pytorch_vocab_dump_path}''' ) _lowercase =corpus.vocab.__dict__ torch.save(A__ , A__ ) _lowercase =corpus.__dict__ corpus_dict_no_vocab.pop('vocab' , A__ ) _lowercase =pytorch_dump_folder_path + '/' + CORPUS_NAME print(F'''Save dataset to {pytorch_dataset_dump_path}''' ) torch.save(A__ , A__ ) if tf_checkpoint_path: # Convert a pre-trained TensorFlow model _lowercase =os.path.abspath(A__ ) _lowercase =os.path.abspath(A__ ) print(F'''Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.''' ) # Initialise PyTorch model if transfo_xl_config_file == "": _lowercase =TransfoXLConfig() else: _lowercase =TransfoXLConfig.from_json_file(A__ ) print(F'''Building PyTorch model from configuration: {config}''' ) _lowercase =TransfoXLLMHeadModel(A__ ) _lowercase =load_tf_weights_in_transfo_xl(A__ , A__ , A__ ) # Save pytorch-model _lowercase =os.path.join(A__ , A__ ) _lowercase =os.path.join(A__ , A__ ) print(F'''Save PyTorch model to {os.path.abspath(A__ )}''' ) torch.save(model.state_dict() , A__ ) print(F'''Save configuration file to {os.path.abspath(A__ )}''' ) with open(A__ , 'w' , encoding='utf-8' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the folder to store the PyTorch model or dataset/vocab.', ) parser.add_argument( '--tf_checkpoint_path', default='', type=str, help='An optional path to a TensorFlow checkpoint path to be converted.', ) parser.add_argument( '--transfo_xl_config_file', default='', type=str, help=( 'An optional config json file corresponding to the pre-trained BERT model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--transfo_xl_dataset_file', default='', type=str, help='An optional dataset file to be converted in a vocabulary.', ) lowercase_ = parser.parse_args() convert_transfo_xl_checkpoint_to_pytorch( args.tf_checkpoint_path, args.transfo_xl_config_file, args.pytorch_dump_folder_path, args.transfo_xl_dataset_file, )
205
1
"""simple docstring""" def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): """simple docstring""" return x if y == 0 else greatest_common_divisor(_SCREAMING_SNAKE_CASE , x % y ) def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): """simple docstring""" return (x * y) // greatest_common_divisor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def a__ ( _SCREAMING_SNAKE_CASE = 20 ): """simple docstring""" UpperCamelCase = 1 for i in range(1 , n + 1 ): UpperCamelCase = lcm(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return g if __name__ == "__main__": print(f'''{solution() = }''')
350
"""simple docstring""" from __future__ import annotations from sys import maxsize from typing import Generic, TypeVar lowerCAmelCase__ = TypeVar('''T''') def a__ ( _SCREAMING_SNAKE_CASE ): """simple docstring""" return (position - 1) // 2 def a__ ( _SCREAMING_SNAKE_CASE ): """simple docstring""" return (2 * position) + 1 def a__ ( _SCREAMING_SNAKE_CASE ): """simple docstring""" return (2 * position) + 2 class _lowerCamelCase ( Generic[T] ): def __init__(self ) -> None: UpperCamelCase = [] UpperCamelCase = {} UpperCamelCase = 0 def __len__(self ) -> int: return self.elements def __repr__(self ) -> str: return str(self.heap ) def snake_case_ (self ) -> bool: # Check if the priority queue is empty return self.elements == 0 def snake_case_ (self , __a , __a ) -> None: # Add an element with given priority to the queue self.heap.append((elem, weight) ) UpperCamelCase = self.elements self.elements += 1 self._bubble_up(__a ) def snake_case_ (self ) -> T: # Remove and return the element with lowest weight (highest priority) if self.elements > 1: self._swap_nodes(0 , self.elements - 1 ) UpperCamelCase , UpperCamelCase = self.heap.pop() del self.position_map[elem] self.elements -= 1 if self.elements > 0: UpperCamelCase , UpperCamelCase = self.heap[0] self._bubble_down(__a ) return elem def snake_case_ (self , __a , __a ) -> None: # Update the weight of the given key UpperCamelCase = self.position_map[elem] UpperCamelCase = (elem, weight) if position > 0: UpperCamelCase = get_parent_position(__a ) UpperCamelCase , UpperCamelCase = self.heap[parent_position] if parent_weight > weight: self._bubble_up(__a ) else: self._bubble_down(__a ) else: self._bubble_down(__a ) def snake_case_ (self , __a ) -> None: # Place a node at the proper position (upward movement) [to be used internally # only] UpperCamelCase = self.position_map[elem] if curr_pos == 0: return None UpperCamelCase = get_parent_position(__a ) UpperCamelCase , UpperCamelCase = self.heap[curr_pos] UpperCamelCase , UpperCamelCase = self.heap[parent_position] if parent_weight > weight: self._swap_nodes(__a , __a ) return self._bubble_up(__a ) return None def snake_case_ (self , __a ) -> None: # Place a node at the proper position (downward movement) [to be used # internally only] UpperCamelCase = self.position_map[elem] UpperCamelCase , UpperCamelCase = self.heap[curr_pos] UpperCamelCase = get_child_left_position(__a ) UpperCamelCase = get_child_right_position(__a ) if child_left_position < self.elements and child_right_position < self.elements: UpperCamelCase , UpperCamelCase = self.heap[child_left_position] UpperCamelCase , UpperCamelCase = self.heap[child_right_position] if child_right_weight < child_left_weight and child_right_weight < weight: self._swap_nodes(__a , __a ) return self._bubble_down(__a ) if child_left_position < self.elements: UpperCamelCase , UpperCamelCase = self.heap[child_left_position] if child_left_weight < weight: self._swap_nodes(__a , __a ) return self._bubble_down(__a ) else: return None if child_right_position < self.elements: UpperCamelCase , UpperCamelCase = self.heap[child_right_position] if child_right_weight < weight: self._swap_nodes(__a , __a ) return self._bubble_down(__a ) return None def snake_case_ (self , __a , __a ) -> None: # Swap the nodes at the given positions UpperCamelCase = self.heap[nodea_pos][0] UpperCamelCase = self.heap[nodea_pos][0] UpperCamelCase , UpperCamelCase = ( self.heap[nodea_pos], self.heap[nodea_pos], ) UpperCamelCase = nodea_pos UpperCamelCase = nodea_pos class _lowerCamelCase ( Generic[T] ): def __init__(self ) -> None: UpperCamelCase = {} UpperCamelCase = 0 def __repr__(self ) -> str: return str(self.connections ) def __len__(self ) -> int: return self.nodes def snake_case_ (self , __a ) -> None: # Add a node in the graph if it is not in the graph if node not in self.connections: UpperCamelCase = {} self.nodes += 1 def snake_case_ (self , __a , __a , __a ) -> None: # Add an edge between 2 nodes in the graph self.add_node(__a ) self.add_node(__a ) UpperCamelCase = weight UpperCamelCase = weight def a__ ( _SCREAMING_SNAKE_CASE , ): """simple docstring""" UpperCamelCase = {node: maxsize for node in graph.connections} UpperCamelCase = {node: None for node in graph.connections} UpperCamelCase = MinPriorityQueue() for node, weight in dist.items(): priority_queue.push(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if priority_queue.is_empty(): return dist, parent # initialization UpperCamelCase = priority_queue.extract_min() UpperCamelCase = 0 for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: UpperCamelCase = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(_SCREAMING_SNAKE_CASE , dist[neighbour] ) UpperCamelCase = node # running prim's algorithm while not priority_queue.is_empty(): UpperCamelCase = priority_queue.extract_min() for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: UpperCamelCase = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(_SCREAMING_SNAKE_CASE , dist[neighbour] ) UpperCamelCase = node return dist, parent
244
0
"""simple docstring""" import os from typing import List, Optional, Union from ...tokenization_utils import PreTrainedTokenizer from ...tokenization_utils_base import AddedToken from ...utils import logging __snake_case = logging.get_logger(__name__) __snake_case = {"""vocab_file""": """vocab.txt"""} __snake_case = { """vocab_file""": { """facebook/esm2_t6_8M_UR50D""": """https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt""", """facebook/esm2_t12_35M_UR50D""": """https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt""", }, } __snake_case = { """facebook/esm2_t6_8M_UR50D""": 1024, """facebook/esm2_t12_35M_UR50D""": 1024, } def __lowerCAmelCase ( lowercase : Optional[Any] ) -> Optional[Any]: """simple docstring""" with open(lowercase , "r" ) as f: snake_case : str = f.read().splitlines() return [l.strip() for l in lines] class _lowerCAmelCase ( snake_case_ ): __UpperCAmelCase : Any = VOCAB_FILES_NAMES __UpperCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP __UpperCAmelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCAmelCase : List[str] = ["input_ids", "attention_mask"] def __init__( self , UpperCamelCase__ , UpperCamelCase__="<unk>" , UpperCamelCase__="<cls>" , UpperCamelCase__="<pad>" , UpperCamelCase__="<mask>" , UpperCamelCase__="<eos>" , **UpperCamelCase__ , ) -> Optional[Any]: '''simple docstring''' super().__init__(**snake_case__ ) snake_case : List[Any] = load_vocab_file(snake_case__ ) snake_case : Dict = dict(enumerate(self.all_tokens ) ) snake_case : Union[str, Any] = {tok: ind for ind, tok in enumerate(self.all_tokens )} snake_case : Union[str, Any] = unk_token snake_case : int = cls_token snake_case : Any = pad_token snake_case : Union[str, Any] = mask_token snake_case : List[str] = eos_token snake_case : str = self.all_tokens self._create_trie(self.unique_no_split_tokens ) def lowerCamelCase ( self , UpperCamelCase__ ) -> List[Any]: '''simple docstring''' return self._id_to_token.get(snake_case__ , self.unk_token ) def lowerCamelCase ( self , UpperCamelCase__ ) -> Any: '''simple docstring''' return self._token_to_id.get(snake_case__ , self._token_to_id.get(self.unk_token ) ) def lowerCamelCase ( self , UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]: '''simple docstring''' return text.split() def lowerCamelCase ( self , UpperCamelCase__=False ) -> Dict: '''simple docstring''' return len(self._id_to_token ) def lowerCamelCase ( self ) -> str: '''simple docstring''' return {token: i for i, token in enumerate(self.all_tokens )} def lowerCamelCase ( self , UpperCamelCase__ ) -> Tuple: '''simple docstring''' return self._token_to_id.get(snake_case__ , self._token_to_id.get(self.unk_token ) ) def lowerCamelCase ( self , UpperCamelCase__ ) -> Union[str, Any]: '''simple docstring''' return self._id_to_token.get(snake_case__ , self.unk_token ) def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Optional[Any]: '''simple docstring''' snake_case : Tuple = [self.cls_token_id] snake_case : Dict = [self.eos_token_id] # No sep token in ESM vocabulary if token_ids_a is None: if self.eos_token_id is None: return cls + token_ids_a else: return cls + token_ids_a + sep elif self.eos_token_id is None: raise ValueError("Cannot tokenize multiple sequences when EOS token is not set!" ) return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = False ) -> Any: '''simple docstring''' if already_has_special_tokens: if token_ids_a is not None: raise ValueError( "You should not supply a second sequence if the provided sequence of " "ids is already formatted with special tokens for the model." ) return [1 if token in self.all_special_ids else 0 for token in token_ids_a] snake_case : List[str] = [1] + ([0] * len(snake_case__ )) + [1] if token_ids_a is not None: mask += [0] * len(snake_case__ ) + [1] return mask def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]: '''simple docstring''' snake_case : Optional[int] = os.path.join(snake_case__ , (filename_prefix + "-" if filename_prefix else "") + "vocab.txt" ) with open(snake_case__ , "w" ) as f: f.write("\n".join(self.all_tokens ) ) return (vocab_file,) @property def lowerCamelCase ( self ) -> Any: '''simple docstring''' return self.get_vocab_size(with_added_tokens=snake_case__ ) def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ = False ) -> Optional[int]: '''simple docstring''' return super()._add_tokens(snake_case__ , special_tokens=snake_case__ )
203
"""simple docstring""" from __future__ import annotations from functools import lru_cache from math import ceil lowerCAmelCase__ = 100 lowerCAmelCase__ = set(range(3, NUM_PRIMES, 2)) primes.add(2) lowerCAmelCase__ = 42 for prime in range(3, ceil(NUM_PRIMES**0.5), 2): if prime not in primes: continue primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime))) @lru_cache(maxsize=1_0_0 ) def a__ ( SCREAMING_SNAKE_CASE : int ): '''simple docstring''' if number_to_partition < 0: return set() elif number_to_partition == 0: return {1} lowerCAmelCase : set[int] = set() lowerCAmelCase : int lowerCAmelCase : int for prime in primes: if prime > number_to_partition: continue for sub in partition(number_to_partition - prime ): ret.add(sub * prime ) return ret def a__ ( SCREAMING_SNAKE_CASE : int = 5_0_0_0 ): '''simple docstring''' for number_to_partition in range(1 , SCREAMING_SNAKE_CASE ): if len(partition(SCREAMING_SNAKE_CASE ) ) > number_unique_partitions: return number_to_partition return None if __name__ == "__main__": print(F"{solution() = }")
108
0
import warnings from ...utils import logging from .image_processing_flava import FlavaImageProcessor lowerCamelCase_ = logging.get_logger(__name__) class a_ ( a_ ): '''simple docstring''' def __init__( self , *lowercase_ , **lowercase_ ) -> None: '''simple docstring''' warnings.warn( 'The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use FlavaImageProcessor instead.' , lowercase_ , ) super().__init__(*lowercase_ , **lowercase_ )
14
import argparse import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( CLIPTokenizer, CLIPTokenizerFast, VideoMAEImageProcessor, XCLIPConfig, XCLIPModel, XCLIPProcessor, XCLIPTextConfig, XCLIPVisionConfig, ) def lowerCamelCase ( a_ , a_ ) -> Tuple: lowerCAmelCase_ = XCLIPTextConfig() # derive patch size from model name lowerCAmelCase_ = model_name.find('patch' ) lowerCAmelCase_ = int(model_name[start_idx + len('patch' ) : start_idx + len('patch' ) + 2] ) lowerCAmelCase_ = XCLIPVisionConfig(patch_size=a_ , num_frames=a_ ) if "large" in model_name: lowerCAmelCase_ = 768 lowerCAmelCase_ = 3_072 lowerCAmelCase_ = 12 lowerCAmelCase_ = 1_024 lowerCAmelCase_ = 4_096 lowerCAmelCase_ = 16 lowerCAmelCase_ = 24 lowerCAmelCase_ = 768 lowerCAmelCase_ = 3_072 if model_name == "xclip-large-patch14-16-frames": lowerCAmelCase_ = 336 lowerCAmelCase_ = XCLIPConfig.from_text_vision_configs(a_ , a_ ) if "large" in model_name: lowerCAmelCase_ = 768 return config def lowerCamelCase ( a_ ) -> List[str]: # text encoder if name == "token_embedding.weight": lowerCAmelCase_ = name.replace('token_embedding.weight' , 'text_model.embeddings.token_embedding.weight' ) if name == "positional_embedding": lowerCAmelCase_ = name.replace('positional_embedding' , 'text_model.embeddings.position_embedding.weight' ) if "ln_1" in name: lowerCAmelCase_ = name.replace('ln_1' , 'layer_norm1' ) if "ln_2" in name: lowerCAmelCase_ = name.replace('ln_2' , 'layer_norm2' ) if "c_fc" in name: lowerCAmelCase_ = name.replace('c_fc' , 'fc1' ) if "c_proj" in name: lowerCAmelCase_ = name.replace('c_proj' , 'fc2' ) if name.startswith('transformer.resblocks' ): lowerCAmelCase_ = name.replace('transformer.resblocks' , 'text_model.encoder.layers' ) if "attn.out_proj" in name and "message" not in name: lowerCAmelCase_ = name.replace('attn.out_proj' , 'self_attn.out_proj' ) if "ln_final" in name: lowerCAmelCase_ = name.replace('ln_final' , 'text_model.final_layer_norm' ) # visual encoder if name == "visual.class_embedding": lowerCAmelCase_ = name.replace('visual.class_embedding' , 'vision_model.embeddings.class_embedding' ) if name == "visual.positional_embedding": lowerCAmelCase_ = name.replace('visual.positional_embedding' , 'vision_model.embeddings.position_embedding.weight' ) if name.startswith('visual.transformer.resblocks' ): lowerCAmelCase_ = name.replace('visual.transformer.resblocks' , 'vision_model.encoder.layers' ) if "visual.conv1" in name: lowerCAmelCase_ = name.replace('visual.conv1' , 'vision_model.embeddings.patch_embedding' ) if "visual.ln_pre" in name: lowerCAmelCase_ = name.replace('visual.ln_pre' , 'vision_model.pre_layernorm' ) if "visual.ln_post" in name: lowerCAmelCase_ = name.replace('visual.ln_post' , 'vision_model.post_layernorm' ) if "visual.proj" in name: lowerCAmelCase_ = name.replace('visual.proj' , 'visual_projection.weight' ) if "text_projection" in name: lowerCAmelCase_ = name.replace('text_projection' , 'text_projection.weight' ) # things on top if "prompts_visual_proj" in name: lowerCAmelCase_ = name.replace('prompts_visual_proj' , 'prompts_visual_projection' ) if "prompts_visual_ln" in name: lowerCAmelCase_ = name.replace('prompts_visual_ln' , 'prompts_visual_layernorm' ) # mit if name == "mit.positional_embedding": lowerCAmelCase_ = name.replace('positional' , 'position' ) if name.startswith('mit.resblocks' ): lowerCAmelCase_ = name.replace('mit.resblocks' , 'mit.encoder.layers' ) # prompts generator if name.startswith('prompts_generator.norm' ): lowerCAmelCase_ = name.replace('prompts_generator.norm' , 'prompts_generator.layernorm' ) return name def lowerCamelCase ( a_ , a_ ) -> Dict: for key in orig_state_dict.copy().keys(): lowerCAmelCase_ = orig_state_dict.pop(a_ ) if "attn.in_proj" in key: lowerCAmelCase_ = key.split('.' ) if key.startswith('visual' ): lowerCAmelCase_ = key_split[3] lowerCAmelCase_ = config.vision_config.hidden_size if "message_attn" in key: if "weight" in key: lowerCAmelCase_ = val[ :dim, : ] lowerCAmelCase_ = val[ dim : dim * 2, : ] lowerCAmelCase_ = val[ -dim:, : ] else: lowerCAmelCase_ = val[ :dim ] lowerCAmelCase_ = val[ dim : dim * 2 ] lowerCAmelCase_ = val[ -dim: ] else: if "weight" in key: lowerCAmelCase_ = val[ :dim, : ] lowerCAmelCase_ = val[ dim : dim * 2, : ] lowerCAmelCase_ = val[ -dim:, : ] else: lowerCAmelCase_ = val[:dim] lowerCAmelCase_ = val[ dim : dim * 2 ] lowerCAmelCase_ = val[-dim:] elif key.startswith('mit' ): lowerCAmelCase_ = key_split[2] lowerCAmelCase_ = config.vision_config.mit_hidden_size if "weight" in key: lowerCAmelCase_ = val[:dim, :] lowerCAmelCase_ = val[dim : dim * 2, :] lowerCAmelCase_ = val[-dim:, :] else: lowerCAmelCase_ = val[:dim] lowerCAmelCase_ = val[dim : dim * 2] lowerCAmelCase_ = val[-dim:] else: lowerCAmelCase_ = key_split[2] lowerCAmelCase_ = config.text_config.hidden_size if "weight" in key: lowerCAmelCase_ = val[:dim, :] lowerCAmelCase_ = val[ dim : dim * 2, : ] lowerCAmelCase_ = val[-dim:, :] else: lowerCAmelCase_ = val[:dim] lowerCAmelCase_ = val[ dim : dim * 2 ] lowerCAmelCase_ = val[-dim:] else: lowerCAmelCase_ = rename_key(a_ ) if new_key_name in ["visual_projection.weight", "text_projection.weight"]: lowerCAmelCase_ = val.T lowerCAmelCase_ = val return orig_state_dict def lowerCamelCase ( a_ ) -> List[str]: if num_frames == 8: lowerCAmelCase_ = 'eating_spaghetti_8_frames.npy' elif num_frames == 16: lowerCAmelCase_ = 'eating_spaghetti.npy' elif num_frames == 32: lowerCAmelCase_ = 'eating_spaghetti_32_frames.npy' lowerCAmelCase_ = hf_hub_download( repo_id='hf-internal-testing/spaghetti-video' , filename=a_ , repo_type='dataset' , ) lowerCAmelCase_ = np.load(a_ ) return list(a_ ) def lowerCamelCase ( a_ , a_=None , a_=False ) -> List[Any]: lowerCAmelCase_ = { # fully supervised kinetics-400 checkpoints 'xclip-base-patch32': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth', 'xclip-base-patch32-16-frames': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth' ), 'xclip-base-patch16': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth', 'xclip-base-patch16-16-frames': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth' ), 'xclip-large-patch14': 'https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&amp;export=download&amp;confirm=t&amp;uuid=b26caedc-88e2-473e-830a-9d158b653cdb', 'xclip-large-patch14-16-frames': 'https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&amp;export=download&amp;confirm=t&amp;uuid=538fa810-e671-4050-b385-9a623f89804f', # fully supervised kinetics-600 checkpoints 'xclip-base-patch16-kinetics-600': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth' ), 'xclip-base-patch16-kinetics-600-16-frames': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth' ), 'xclip-large-patch14-kinetics-600': 'https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&amp;export=download&amp;confirm=t&amp;uuid=141d4977-4a65-44ae-864f-4b0c19f838be', # few shot 'xclip-base-patch16-hmdb-2-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth' ), 'xclip-base-patch16-hmdb-4-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth' ), 'xclip-base-patch16-hmdb-8-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth' ), 'xclip-base-patch16-hmdb-16-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth' ), 'xclip-base-patch16-ucf-2-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth' ), 'xclip-base-patch16-ucf-4-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth' ), 'xclip-base-patch16-ucf-8-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth' ), 'xclip-base-patch16-ucf-16-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth' ), # zero shot 'xclip-base-patch16-zero-shot': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth', } lowerCAmelCase_ = model_to_url[model_name] lowerCAmelCase_ = 8 if "16-frames" in model_name: lowerCAmelCase_ = 16 elif "shot" in model_name: lowerCAmelCase_ = 32 lowerCAmelCase_ = get_xclip_config(a_ , a_ ) lowerCAmelCase_ = XCLIPModel(a_ ) model.eval() if "drive" in checkpoint_url: lowerCAmelCase_ = 'pytorch_model.bin' gdown.cached_download(a_ , a_ , quiet=a_ ) lowerCAmelCase_ = torch.load(a_ , map_location='cpu' )['model'] else: lowerCAmelCase_ = torch.hub.load_state_dict_from_url(a_ )['model'] lowerCAmelCase_ = convert_state_dict(a_ , a_ ) lowerCAmelCase_ = XCLIPModel(a_ ) lowerCAmelCase_ , lowerCAmelCase_ = model.load_state_dict(a_ , strict=a_ ) assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"] model.eval() lowerCAmelCase_ = 336 if model_name == 'xclip-large-patch14-16-frames' else 224 lowerCAmelCase_ = VideoMAEImageProcessor(size=a_ ) lowerCAmelCase_ = CLIPTokenizer.from_pretrained('openai/clip-vit-base-patch32' ) lowerCAmelCase_ = CLIPTokenizerFast.from_pretrained('openai/clip-vit-base-patch32' ) lowerCAmelCase_ = XCLIPProcessor(image_processor=a_ , tokenizer=a_ ) lowerCAmelCase_ = prepare_video(a_ ) lowerCAmelCase_ = processor( text=['playing sports', 'eating spaghetti', 'go shopping'] , videos=a_ , return_tensors='pt' , padding=a_ ) print('Shape of pixel values:' , inputs.pixel_values.shape ) with torch.no_grad(): lowerCAmelCase_ = model(**a_ ) # Verify outputs lowerCAmelCase_ = outputs.logits_per_video lowerCAmelCase_ = logits_per_video.softmax(dim=1 ) print('Probs:' , a_ ) # kinetics-400 if model_name == "xclip-base-patch32": lowerCAmelCase_ = torch.tensor([[0.0_019, 0.9_951, 0.0_030]] ) elif model_name == "xclip-base-patch32-16-frames": lowerCAmelCase_ = torch.tensor([[7.0999e-04, 9.9883e-01, 4.5580e-04]] ) elif model_name == "xclip-base-patch16": lowerCAmelCase_ = torch.tensor([[0.0_083, 0.9_681, 0.0_236]] ) elif model_name == "xclip-base-patch16-16-frames": lowerCAmelCase_ = torch.tensor([[7.6937e-04, 9.9728e-01, 1.9473e-03]] ) elif model_name == "xclip-large-patch14": lowerCAmelCase_ = torch.tensor([[0.0_062, 0.9_864, 0.0_075]] ) elif model_name == "xclip-large-patch14-16-frames": lowerCAmelCase_ = torch.tensor([[3.3877e-04, 9.9937e-01, 2.8888e-04]] ) # kinetics-600 elif model_name == "xclip-base-patch16-kinetics-600": lowerCAmelCase_ = torch.tensor([[0.0_555, 0.8_914, 0.0_531]] ) elif model_name == "xclip-base-patch16-kinetics-600-16-frames": lowerCAmelCase_ = torch.tensor([[3.8554e-04, 9.9929e-01, 3.2754e-04]] ) elif model_name == "xclip-large-patch14-kinetics-600": lowerCAmelCase_ = torch.tensor([[0.0_036, 0.9_920, 0.0_045]] ) # few shot elif model_name == "xclip-base-patch16-hmdb-2-shot": lowerCAmelCase_ = torch.tensor([[7.1890e-06, 9.9994e-01, 5.6559e-05]] ) elif model_name == "xclip-base-patch16-hmdb-4-shot": lowerCAmelCase_ = torch.tensor([[1.0320e-05, 9.9993e-01, 6.2435e-05]] ) elif model_name == "xclip-base-patch16-hmdb-8-shot": lowerCAmelCase_ = torch.tensor([[4.1377e-06, 9.9990e-01, 9.8386e-05]] ) elif model_name == "xclip-base-patch16-hmdb-16-shot": lowerCAmelCase_ = torch.tensor([[4.1347e-05, 9.9962e-01, 3.3411e-04]] ) elif model_name == "xclip-base-patch16-ucf-2-shot": lowerCAmelCase_ = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] ) elif model_name == "xclip-base-patch16-ucf-4-shot": lowerCAmelCase_ = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] ) elif model_name == "xclip-base-patch16-ucf-8-shot": lowerCAmelCase_ = torch.tensor([[0.0_027, 0.9_904, 0.0_070]] ) elif model_name == "xclip-base-patch16-ucf-16-shot": lowerCAmelCase_ = torch.tensor([[9.8219e-04, 9.9593e-01, 3.0863e-03]] ) # zero shot elif model_name == "xclip-base-patch16-zero-shot": lowerCAmelCase_ = torch.tensor([[3.5082e-04, 9.9785e-01, 1.7966e-03]] ) else: raise ValueError(F'''Model name {model_name} not supported''' ) assert torch.allclose(a_ , a_ , atol=1e-3 ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(a_ ) if push_to_hub: print('Pushing model, processor and slow tokenizer files to the hub...' ) model.push_to_hub(a_ , organization='nielsr' ) processor.push_to_hub(a_ , organization='nielsr' ) slow_tokenizer.push_to_hub(a_ , organization='nielsr' ) if __name__ == "__main__": lowerCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""xclip-base-patch32""", type=str, help="""Name of the model.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) lowerCamelCase_ = parser.parse_args() convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
14
1
def __magic_name__ ( A : list ): '''simple docstring''' def merge(A : list, A : list ) -> list: def _merge(): while left and right: yield (left if left[0] <= right[0] else right).pop(0 ) yield from left yield from right return list(_merge() ) if len(A ) <= 1: return collection a = len(A ) // 2 return merge(merge_sort(collection[:mid] ), merge_sort(collection[mid:] ) ) if __name__ == "__main__": import doctest doctest.testmod() __lowerCAmelCase : List[Any] = input('Enter numbers separated by a comma:\n').strip() __lowerCAmelCase : List[Any] = [int(item) for item in user_input.split(',')] print(*merge_sort(unsorted), sep=',')
107
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available lowercase : Any = { "configuration_chinese_clip": [ "CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", "ChineseCLIPConfig", "ChineseCLIPOnnxConfig", "ChineseCLIPTextConfig", "ChineseCLIPVisionConfig", ], "processing_chinese_clip": ["ChineseCLIPProcessor"], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Optional[Any] = ["ChineseCLIPFeatureExtractor"] lowercase : List[Any] = ["ChineseCLIPImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Any = [ "CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "ChineseCLIPModel", "ChineseCLIPPreTrainedModel", "ChineseCLIPTextModel", "ChineseCLIPVisionModel", ] if TYPE_CHECKING: from .configuration_chinese_clip import ( CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, ChineseCLIPConfig, ChineseCLIPOnnxConfig, ChineseCLIPTextConfig, ChineseCLIPVisionConfig, ) from .processing_chinese_clip import ChineseCLIPProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_chinese_clip import ( CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, ChineseCLIPModel, ChineseCLIPPreTrainedModel, ChineseCLIPTextModel, ChineseCLIPVisionModel, ) else: import sys lowercase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
42
0
import json import os from pathlib import Path from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple, Union import sentencepiece from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __snake_case : Tuple = logging.get_logger(__name__) __snake_case : Tuple = """▁""" __snake_case : Dict = { """vocab_file""": """vocab.json""", """spm_file""": """sentencepiece.bpe.model""", } __snake_case : Union[str, Any] = { """vocab_file""": { """facebook/s2t-small-librispeech-asr""": ( """https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json""" ), }, """spm_file""": { """facebook/s2t-small-librispeech-asr""": ( """https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model""" ) }, } __snake_case : Union[str, Any] = { """facebook/s2t-small-librispeech-asr""": 10_24, } __snake_case : Optional[Any] = ["""pt""", """fr""", """ru""", """nl""", """ro""", """it""", """es""", """de"""] __snake_case : Optional[int] = {"""mustc""": MUSTC_LANGS} class __SCREAMING_SNAKE_CASE ( lowerCamelCase__): _SCREAMING_SNAKE_CASE : Optional[int] = VOCAB_FILES_NAMES _SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_VOCAB_FILES_MAP _SCREAMING_SNAKE_CASE : Union[str, Any] = MAX_MODEL_INPUT_SIZES _SCREAMING_SNAKE_CASE : int = ["""input_ids""", """attention_mask"""] _SCREAMING_SNAKE_CASE : List[int] = [] def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase="<s>" , _UpperCamelCase="</s>" , _UpperCamelCase="<pad>" , _UpperCamelCase="<unk>" , _UpperCamelCase=False , _UpperCamelCase=False , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase = None , **_UpperCamelCase , ): """simple docstring""" lowerCAmelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , do_upper_case=snake_case__ , do_lower_case=snake_case__ , tgt_lang=snake_case__ , lang_codes=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , ) lowerCAmelCase__ = do_upper_case lowerCAmelCase__ = do_lower_case lowerCAmelCase__ = load_json(snake_case__ ) lowerCAmelCase__ = {v: k for k, v in self.encoder.items()} lowerCAmelCase__ = spm_file lowerCAmelCase__ = load_spm(snake_case__ , self.sp_model_kwargs ) if lang_codes is not None: lowerCAmelCase__ = lang_codes lowerCAmelCase__ = LANGUAGES[lang_codes] lowerCAmelCase__ = [F"<lang:{lang}>" for lang in self.langs] lowerCAmelCase__ = {lang: self.sp_model.PieceToId(F"<lang:{lang}>" ) for lang in self.langs} lowerCAmelCase__ = self.lang_tokens lowerCAmelCase__ = tgt_lang if tgt_lang is not None else self.langs[0] self.set_tgt_lang_special_tokens(self._tgt_lang ) else: lowerCAmelCase__ = {} @property def UpperCamelCase__ ( self ): """simple docstring""" return len(self.encoder ) @property def UpperCamelCase__ ( self ): """simple docstring""" return self._tgt_lang @tgt_lang.setter def UpperCamelCase__ ( self , _UpperCamelCase ): """simple docstring""" lowerCAmelCase__ = new_tgt_lang self.set_tgt_lang_special_tokens(snake_case__ ) def UpperCamelCase__ ( self , _UpperCamelCase ): """simple docstring""" lowerCAmelCase__ = self.lang_code_to_id[tgt_lang] lowerCAmelCase__ = [lang_code_id] def UpperCamelCase__ ( self , _UpperCamelCase ): """simple docstring""" return self.sp_model.encode(snake_case__ , out_type=snake_case__ ) def UpperCamelCase__ ( self , _UpperCamelCase ): """simple docstring""" return self.encoder.get(snake_case__ , self.encoder[self.unk_token] ) def UpperCamelCase__ ( self , _UpperCamelCase ): """simple docstring""" return self.decoder.get(snake_case__ , self.unk_token ) def UpperCamelCase__ ( self , _UpperCamelCase ): """simple docstring""" lowerCAmelCase__ = [] lowerCAmelCase__ = '''''' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: lowerCAmelCase__ = self.sp_model.decode(snake_case__ ) out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " " lowerCAmelCase__ = [] else: current_sub_tokens.append(snake_case__ ) lowerCAmelCase__ = self.sp_model.decode(snake_case__ ) out_string += decoded.upper() if self.do_upper_case else decoded return out_string.strip() def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase=None ): """simple docstring""" if token_ids_a is None: return self.prefix_tokens + token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id] def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ ) lowerCAmelCase__ = [1] * len(self.prefix_tokens ) lowerCAmelCase__ = [1] if token_ids_a is None: return prefix_ones + ([0] * len(snake_case__ )) + suffix_ones return prefix_ones + ([0] * len(snake_case__ )) + ([0] * len(snake_case__ )) + suffix_ones def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase__ = self.encoder.copy() vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ): """simple docstring""" lowerCAmelCase__ = self.__dict__.copy() lowerCAmelCase__ = None return state def __setstate__( self , _UpperCamelCase ): """simple docstring""" lowerCAmelCase__ = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): lowerCAmelCase__ = {} lowerCAmelCase__ = load_spm(self.spm_file , self.sp_model_kwargs ) def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase = None ): """simple docstring""" lowerCAmelCase__ = Path(snake_case__ ) assert save_dir.is_dir(), F"{save_directory} should be a directory" lowerCAmelCase__ = save_dir / ( (filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file'''] ) lowerCAmelCase__ = save_dir / ( (filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file'''] ) save_json(self.encoder , snake_case__ ) if os.path.abspath(self.spm_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.spm_file ): copyfile(self.spm_file , snake_case__ ) elif not os.path.isfile(self.spm_file ): with open(snake_case__ , 'wb' ) as fi: lowerCAmelCase__ = self.sp_model.serialized_model_proto() fi.write(snake_case__ ) return (str(snake_case__ ), str(snake_case__ )) def _UpperCamelCase ( UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any] ) -> sentencepiece.SentencePieceProcessor: """simple docstring""" lowerCAmelCase__ = sentencepiece.SentencePieceProcessor(**__lowerCAmelCase ) spm.Load(str(__lowerCAmelCase ) ) return spm def _UpperCamelCase ( UpperCamelCase_ : List[Any] ) -> Union[Dict, List]: """simple docstring""" with open(__lowerCAmelCase , 'r' ) as f: return json.load(__lowerCAmelCase ) def _UpperCamelCase ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Tuple ) -> None: """simple docstring""" with open(__lowerCAmelCase , 'w' ) as f: json.dump(__lowerCAmelCase , __lowerCAmelCase , indent=2 )
362
import copy from typing import Any, Dict, List, Optional, Union import numpy as np import torch from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging __snake_case : Any = logging.get_logger(__name__) class __SCREAMING_SNAKE_CASE ( __lowercase): _SCREAMING_SNAKE_CASE : Tuple = ['''input_features''', '''is_longer'''] def __init__( self , _UpperCamelCase=64 , _UpperCamelCase=4_80_00 , _UpperCamelCase=4_80 , _UpperCamelCase=10 , _UpperCamelCase=10_24 , _UpperCamelCase=0.0 , _UpperCamelCase=False , _UpperCamelCase = 0 , _UpperCamelCase = 1_40_00 , _UpperCamelCase = None , _UpperCamelCase = "fusion" , _UpperCamelCase = "repeatpad" , **_UpperCamelCase , ): """simple docstring""" super().__init__( feature_size=_UpperCamelCase , sampling_rate=_UpperCamelCase , padding_value=_UpperCamelCase , return_attention_mask=_UpperCamelCase , **_UpperCamelCase , ) lowerCAmelCase__ = top_db lowerCAmelCase__ = truncation lowerCAmelCase__ = padding lowerCAmelCase__ = fft_window_size lowerCAmelCase__ = (fft_window_size >> 1) + 1 lowerCAmelCase__ = hop_length lowerCAmelCase__ = max_length_s lowerCAmelCase__ = max_length_s * sampling_rate lowerCAmelCase__ = sampling_rate lowerCAmelCase__ = frequency_min lowerCAmelCase__ = frequency_max lowerCAmelCase__ = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_UpperCamelCase , min_frequency=_UpperCamelCase , max_frequency=_UpperCamelCase , sampling_rate=_UpperCamelCase , norm=_UpperCamelCase , mel_scale='htk' , ) lowerCAmelCase__ = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_UpperCamelCase , min_frequency=_UpperCamelCase , max_frequency=_UpperCamelCase , sampling_rate=_UpperCamelCase , norm='slaney' , mel_scale='slaney' , ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase__ = copy.deepcopy(self.__dict__ ) lowerCAmelCase__ = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] if "mel_filters_slaney" in output: del output["mel_filters_slaney"] return output def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase = None ): """simple docstring""" lowerCAmelCase__ = spectrogram( _UpperCamelCase , window_function(self.fft_window_size , 'hann' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=_UpperCamelCase , log_mel='dB' , ) return log_mel_spectrogram.T def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): """simple docstring""" lowerCAmelCase__ = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 ) if len(ranges[1] ) == 0: # if the audio is too short, we just use the first chunk lowerCAmelCase__ = [0] if len(ranges[2] ) == 0: # if the audio is too short, we just use the first chunk lowerCAmelCase__ = [0] # randomly choose index for each part lowerCAmelCase__ = np.random.choice(ranges[0] ) lowerCAmelCase__ = np.random.choice(ranges[1] ) lowerCAmelCase__ = np.random.choice(ranges[2] ) lowerCAmelCase__ = mel[idx_front : idx_front + chunk_frames, :] lowerCAmelCase__ = mel[idx_middle : idx_middle + chunk_frames, :] lowerCAmelCase__ = mel[idx_back : idx_back + chunk_frames, :] lowerCAmelCase__ = torch.tensor(mel[None, None, :] ) lowerCAmelCase__ = torch.nn.functional.interpolate( _UpperCamelCase , size=[chunk_frames, 64] , mode='bilinear' , align_corners=_UpperCamelCase ) lowerCAmelCase__ = mel_shrink[0][0].numpy() lowerCAmelCase__ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 ) return mel_fusion def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): """simple docstring""" if waveform.shape[0] > max_length: if truncation == "rand_trunc": lowerCAmelCase__ = True # random crop to max_length (for compatibility) -> this should be handled by self.pad lowerCAmelCase__ = len(_UpperCamelCase ) - max_length lowerCAmelCase__ = np.random.randint(0 , overflow + 1 ) lowerCAmelCase__ = waveform[idx : idx + max_length] lowerCAmelCase__ = self._np_extract_fbank_features(_UpperCamelCase , self.mel_filters_slaney )[None, :] elif truncation == "fusion": lowerCAmelCase__ = self._np_extract_fbank_features(_UpperCamelCase , self.mel_filters ) lowerCAmelCase__ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed lowerCAmelCase__ = mel.shape[0] if chunk_frames == total_frames: # there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length. # In this case, we just use the whole audio. lowerCAmelCase__ = np.stack([mel, mel, mel, mel] , axis=0 ) lowerCAmelCase__ = False else: lowerCAmelCase__ = self._random_mel_fusion(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) lowerCAmelCase__ = True else: raise NotImplementedError(F"data_truncating {truncation} not implemented" ) else: lowerCAmelCase__ = False # only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding if waveform.shape[0] < max_length: if padding == "repeat": lowerCAmelCase__ = int(max_length / len(_UpperCamelCase ) ) lowerCAmelCase__ = np.stack(np.tile(_UpperCamelCase , n_repeat + 1 ) )[:max_length] if padding == "repeatpad": lowerCAmelCase__ = int(max_length / len(_UpperCamelCase ) ) lowerCAmelCase__ = np.stack(np.tile(_UpperCamelCase , _UpperCamelCase ) ) lowerCAmelCase__ = np.pad(_UpperCamelCase , (0, max_length - waveform.shape[0]) , mode='constant' , constant_values=0 ) if truncation == "fusion": lowerCAmelCase__ = self._np_extract_fbank_features(_UpperCamelCase , self.mel_filters ) lowerCAmelCase__ = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 ) else: lowerCAmelCase__ = self._np_extract_fbank_features(_UpperCamelCase , self.mel_filters_slaney )[None, :] return input_mel, longer def __call__( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ): """simple docstring""" lowerCAmelCase__ = truncation if truncation is not None else self.truncation lowerCAmelCase__ = padding if padding else self.padding if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a" F" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input" F" was sampled with {self.sampling_rate} and not {sampling_rate}." ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) lowerCAmelCase__ = isinstance(_UpperCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F"Only mono-channel audio is supported for input to {self}" ) lowerCAmelCase__ = is_batched_numpy or ( isinstance(_UpperCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: lowerCAmelCase__ = [np.asarray(_UpperCamelCase , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(_UpperCamelCase , np.ndarray ): lowerCAmelCase__ = np.asarray(_UpperCamelCase , dtype=np.floataa ) elif isinstance(_UpperCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): lowerCAmelCase__ = raw_speech.astype(np.floataa ) # always return batch if not is_batched: lowerCAmelCase__ = [np.asarray(_UpperCamelCase )] # convert to mel spectrogram, truncate and pad if needed. lowerCAmelCase__ = [ self._get_input_mel(_UpperCamelCase , max_length if max_length else self.nb_max_samples , _UpperCamelCase , _UpperCamelCase ) for waveform in raw_speech ] lowerCAmelCase__ = [] lowerCAmelCase__ = [] for mel, longer in padded_inputs: input_mel.append(_UpperCamelCase ) is_longer.append(_UpperCamelCase ) if truncation == "fusion" and sum(_UpperCamelCase ) == 0: # if no audio is longer than 10s, then randomly select one audio to be longer lowerCAmelCase__ = np.random.randint(0 , len(_UpperCamelCase ) ) lowerCAmelCase__ = True if isinstance(input_mel[0] , _UpperCamelCase ): lowerCAmelCase__ = [np.asarray(_UpperCamelCase , dtype=np.floataa ) for feature in input_mel] # is_longer is a list of bool lowerCAmelCase__ = [[longer] for longer in is_longer] lowerCAmelCase__ = {'input_features': input_mel, 'is_longer': is_longer} lowerCAmelCase__ = BatchFeature(_UpperCamelCase ) if return_tensors is not None: lowerCAmelCase__ = input_features.convert_to_tensors(_UpperCamelCase ) return input_features
122
0
"""simple docstring""" import time import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers.generation import ( MaxLengthCriteria, MaxNewTokensCriteria, MaxTimeCriteria, StoppingCriteriaList, validate_stopping_criteria, ) @require_torch class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : str ): __lowercase = 3 __lowercase = 2_5_0 __lowercase = ids_tensor((batch_size, length), UpperCAmelCase__ ) __lowercase = torch.ones((batch_size, length), device=UpperCAmelCase__, dtype=torch.float ) / length return input_ids, scores def _lowercase ( self : List[Any] ): __lowercase ,__lowercase = self._get_tensors(5 ) __lowercase = StoppingCriteriaList( [ MaxLengthCriteria(max_length=1_0 ), MaxTimeCriteria(max_time=0.1 ), ] ) self.assertFalse(criteria(UpperCAmelCase__, UpperCAmelCase__ ) ) __lowercase ,__lowercase = self._get_tensors(9 ) self.assertFalse(criteria(UpperCAmelCase__, UpperCAmelCase__ ) ) __lowercase ,__lowercase = self._get_tensors(1_0 ) self.assertTrue(criteria(UpperCAmelCase__, UpperCAmelCase__ ) ) def _lowercase ( self : Any ): __lowercase = MaxLengthCriteria(max_length=1_0 ) __lowercase ,__lowercase = self._get_tensors(5 ) self.assertFalse(criteria(UpperCAmelCase__, UpperCAmelCase__ ) ) __lowercase ,__lowercase = self._get_tensors(9 ) self.assertFalse(criteria(UpperCAmelCase__, UpperCAmelCase__ ) ) __lowercase ,__lowercase = self._get_tensors(1_0 ) self.assertTrue(criteria(UpperCAmelCase__, UpperCAmelCase__ ) ) def _lowercase ( self : List[Any] ): __lowercase = MaxNewTokensCriteria(start_length=5, max_new_tokens=5 ) __lowercase ,__lowercase = self._get_tensors(5 ) self.assertFalse(criteria(UpperCAmelCase__, UpperCAmelCase__ ) ) __lowercase ,__lowercase = self._get_tensors(9 ) self.assertFalse(criteria(UpperCAmelCase__, UpperCAmelCase__ ) ) __lowercase ,__lowercase = self._get_tensors(1_0 ) self.assertTrue(criteria(UpperCAmelCase__, UpperCAmelCase__ ) ) __lowercase = StoppingCriteriaList([criteria] ) self.assertEqual(criteria_list.max_length, 1_0 ) def _lowercase ( self : Optional[int] ): __lowercase ,__lowercase = self._get_tensors(5 ) __lowercase = MaxTimeCriteria(max_time=0.1 ) self.assertFalse(criteria(UpperCAmelCase__, UpperCAmelCase__ ) ) __lowercase = MaxTimeCriteria(max_time=0.1, initial_timestamp=time.time() - 0.2 ) self.assertTrue(criteria(UpperCAmelCase__, UpperCAmelCase__ ) ) def _lowercase ( self : int ): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(1_0 )] ), 1_0 ) with self.assertWarns(UpperCAmelCase__ ): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(1_0 )] ), 1_1 ) __lowercase = validate_stopping_criteria(StoppingCriteriaList(), 1_1 ) self.assertEqual(len(UpperCAmelCase__ ), 1 )
17
import argparse from pathlib import Path from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : Path , lowerCAmelCase : str = None , lowerCAmelCase : str = None , lowerCAmelCase : str = None , ): """simple docstring""" if config_name_or_path is None: SCREAMING_SNAKE_CASE_ : Union[str, Any] = "facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base" if generator_tokenizer_name_or_path is None: SCREAMING_SNAKE_CASE_ : Dict = generator_name_or_path if question_encoder_tokenizer_name_or_path is None: SCREAMING_SNAKE_CASE_ : Union[str, Any] = question_encoder_name_or_path SCREAMING_SNAKE_CASE_ : Union[str, Any] = RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration # Save model. SCREAMING_SNAKE_CASE_ : List[Any] = RagConfig.from_pretrained(lowerCAmelCase ) SCREAMING_SNAKE_CASE_ : Tuple = AutoConfig.from_pretrained(lowerCAmelCase ) SCREAMING_SNAKE_CASE_ : int = AutoConfig.from_pretrained(lowerCAmelCase ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = gen_config SCREAMING_SNAKE_CASE_ : Optional[Any] = question_encoder_config SCREAMING_SNAKE_CASE_ : Dict = model_class.from_pretrained_question_encoder_generator( lowerCAmelCase , lowerCAmelCase , config=lowerCAmelCase ) rag_model.save_pretrained(lowerCAmelCase ) # Sanity check. model_class.from_pretrained(lowerCAmelCase ) # Save tokenizers. SCREAMING_SNAKE_CASE_ : Optional[Any] = AutoTokenizer.from_pretrained(lowerCAmelCase ) gen_tokenizer.save_pretrained(dest_dir / "generator_tokenizer/" ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCAmelCase ) question_encoder_tokenizer.save_pretrained(dest_dir / "question_encoder_tokenizer/" ) if __name__ == "__main__": __lowerCamelCase : List[Any] = argparse.ArgumentParser() parser.add_argument( '''--model_type''', choices=['''rag_sequence''', '''rag_token'''], required=True, type=str, help='''RAG model type: rag_sequence, rag_token''', ) parser.add_argument('''--dest''', type=str, required=True, help='''Path to the output checkpoint directory.''') parser.add_argument('''--generator_name_or_path''', type=str, required=True, help='''Generator model identifier''') parser.add_argument( '''--question_encoder_name_or_path''', type=str, required=True, help='''Question encoder model identifier''' ) parser.add_argument( '''--generator_tokenizer_name_or_path''', type=str, help='''Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``''', ) parser.add_argument( '''--question_encoder_tokenizer_name_or_path''', type=str, help='''Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``''', ) parser.add_argument( '''--config_name_or_path''', type=str, help=( '''Identifier of the model config to use, if not provided, resolves to a base config for a given''' ''' ``model_type``''' ), ) __lowerCamelCase : str = parser.parse_args() __lowerCamelCase : int = Path(args.dest) dest_dir.mkdir(exist_ok=True) consolidate( args.model_type, args.generator_name_or_path, args.question_encoder_name_or_path, dest_dir, args.config_name_or_path, args.generator_tokenizer_name_or_path, args.question_encoder_tokenizer_name_or_path, )
18
0
"""simple docstring""" import numpy as np class a_ : '''simple docstring''' def __init__(self ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = (0, 0) lowerCamelCase__ : Dict = None lowerCamelCase__ : Optional[Any] = 0 lowerCamelCase__ : Optional[int] = 0 lowerCamelCase__ : Dict = 0 def __eq__(self, lowerCamelCase_ ): '''simple docstring''' return self.position == cell.position def a__ (self ): '''simple docstring''' print(self.position ) class a_ : '''simple docstring''' def __init__(self, lowerCamelCase_=(5, 5) ): '''simple docstring''' lowerCamelCase__ : str = np.zeros(lowerCamelCase_ ) lowerCamelCase__ : Any = world_size[0] lowerCamelCase__ : Optional[int] = world_size[1] def a__ (self ): '''simple docstring''' print(self.w ) def a__ (self, lowerCamelCase_ ): '''simple docstring''' lowerCamelCase__ : str = [ (-1, -1), (-1, 0), (-1, 1), (0, -1), (0, 1), (1, -1), (1, 0), (1, 1), ] lowerCamelCase__ : Dict = cell.position[0] lowerCamelCase__ : Dict = cell.position[1] lowerCamelCase__ : List[str] = [] for n in neughbour_cord: lowerCamelCase__ : str = current_x + n[0] lowerCamelCase__ : List[str] = current_y + n[1] if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit: lowerCamelCase__ : Union[str, Any] = Cell() lowerCamelCase__ : Any = (x, y) lowerCamelCase__ : List[str] = cell neighbours.append(lowerCamelCase_ ) return neighbours def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): lowerCamelCase__ : str = [] lowerCamelCase__ : str = [] _open.append(_lowerCamelCase ) while _open: lowerCamelCase__ : List[Any] = np.argmin([n.f for n in _open] ) lowerCamelCase__ : Optional[int] = _open[min_f] _closed.append(_open.pop(_lowerCamelCase ) ) if current == goal: break for n in world.get_neigbours(_lowerCamelCase ): for c in _closed: if c == n: continue lowerCamelCase__ : List[str] = current.g + 1 lowerCamelCase__ , lowerCamelCase__ : str = n.position lowerCamelCase__ , lowerCamelCase__ : Optional[int] = goal.position lowerCamelCase__ : Dict = (ya - ya) ** 2 + (xa - xa) ** 2 lowerCamelCase__ : str = n.h + n.g for c in _open: if c == n and c.f < n.f: continue _open.append(_lowerCamelCase ) lowerCamelCase__ : Tuple = [] while current.parent is not None: path.append(current.position ) lowerCamelCase__ : List[str] = current.parent path.append(current.position ) return path[::-1] if __name__ == "__main__": A_ : Optional[Any] = Gridworld() # Start position and goal A_ : Optional[Any] = Cell() A_ : Optional[Any] = (0, 0) A_ : Union[str, Any] = Cell() A_ : Union[str, Any] = (4, 4) print(f"path from {start.position} to {goal.position}") A_ : Tuple = astar(world, start, goal) # Just for visual reasons. for i in s: A_ : Any = 1 print(world.w)
316
"""simple docstring""" import json import os import shutil import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoConfig, BertConfig, GPTaConfig from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import TOKEN, USER, is_staging_test sys.path.append(str(Path(__file__).parent.parent / "utils")) from test_module.custom_configuration import CustomConfig # noqa E402 A_ : Any = { "return_dict": False, "output_hidden_states": True, "output_attentions": True, "torchscript": True, "torch_dtype": "float16", "use_bfloat16": True, "tf_legacy_loss": True, "pruned_heads": {"a": 1}, "tie_word_embeddings": False, "is_decoder": True, "cross_attention_hidden_size": 1_28, "add_cross_attention": True, "tie_encoder_decoder": True, "max_length": 50, "min_length": 3, "do_sample": True, "early_stopping": True, "num_beams": 3, "num_beam_groups": 3, "diversity_penalty": 0.5, "temperature": 2.0, "top_k": 10, "top_p": 0.7, "typical_p": 0.2, "repetition_penalty": 0.8, "length_penalty": 0.8, "no_repeat_ngram_size": 5, "encoder_no_repeat_ngram_size": 5, "bad_words_ids": [1, 2, 3], "num_return_sequences": 3, "chunk_size_feed_forward": 5, "output_scores": True, "return_dict_in_generate": True, "forced_bos_token_id": 2, "forced_eos_token_id": 3, "remove_invalid_values": True, "architectures": ["BertModel"], "finetuning_task": "translation", "id2label": {0: "label"}, "label2id": {"label": "0"}, "tokenizer_class": "BertTokenizerFast", "prefix": "prefix", "bos_token_id": 6, "pad_token_id": 7, "eos_token_id": 8, "sep_token_id": 9, "decoder_start_token_id": 10, "exponential_decay_length_penalty": (5, 1.01), "suppress_tokens": [0, 1], "begin_suppress_tokens": 2, "task_specific_params": {"translation": "some_params"}, "problem_type": "regression", } @is_staging_test class a_ ( unittest.TestCase ): '''simple docstring''' @classmethod def a__ (cls ): '''simple docstring''' lowerCamelCase__ : Tuple = TOKEN HfFolder.save_token(lowerCamelCase_ ) @classmethod def a__ (cls ): '''simple docstring''' try: delete_repo(token=cls._token, repo_id='test-config' ) except HTTPError: pass try: delete_repo(token=cls._token, repo_id='valid_org/test-config-org' ) except HTTPError: pass try: delete_repo(token=cls._token, repo_id='test-dynamic-config' ) except HTTPError: pass def a__ (self ): '''simple docstring''' lowerCamelCase__ : Optional[int] = BertConfig( vocab_size=9_9, hidden_size=3_2, num_hidden_layers=5, num_attention_heads=4, intermediate_size=3_7 ) config.push_to_hub('test-config', use_auth_token=self._token ) lowerCamelCase__ : List[str] = BertConfig.from_pretrained(f'''{USER}/test-config''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) ) # Reset repo delete_repo(token=self._token, repo_id='test-config' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(lowerCamelCase_, repo_id='test-config', push_to_hub=lowerCamelCase_, use_auth_token=self._token ) lowerCamelCase__ : List[Any] = BertConfig.from_pretrained(f'''{USER}/test-config''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) ) def a__ (self ): '''simple docstring''' lowerCamelCase__ : List[Any] = BertConfig( vocab_size=9_9, hidden_size=3_2, num_hidden_layers=5, num_attention_heads=4, intermediate_size=3_7 ) config.push_to_hub('valid_org/test-config-org', use_auth_token=self._token ) lowerCamelCase__ : int = BertConfig.from_pretrained('valid_org/test-config-org' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) ) # Reset repo delete_repo(token=self._token, repo_id='valid_org/test-config-org' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( lowerCamelCase_, repo_id='valid_org/test-config-org', push_to_hub=lowerCamelCase_, use_auth_token=self._token ) lowerCamelCase__ : Tuple = BertConfig.from_pretrained('valid_org/test-config-org' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) ) def a__ (self ): '''simple docstring''' CustomConfig.register_for_auto_class() lowerCamelCase__ : str = CustomConfig(attribute=4_2 ) config.push_to_hub('test-dynamic-config', use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual(config.auto_map, {'AutoConfig': 'custom_configuration.CustomConfig'} ) lowerCamelCase__ : List[str] = AutoConfig.from_pretrained(f'''{USER}/test-dynamic-config''', trust_remote_code=lowerCamelCase_ ) # Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module self.assertEqual(new_config.__class__.__name__, 'CustomConfig' ) self.assertEqual(new_config.attribute, 4_2 ) class a_ ( unittest.TestCase ): '''simple docstring''' def a__ (self ): '''simple docstring''' lowerCamelCase__ : Tuple = GPTaConfig() # attempt to modify each of int/float/bool/str config records and verify they were updated lowerCamelCase__ : Union[str, Any] = c.n_embd + 1 # int lowerCamelCase__ : Optional[Any] = c.resid_pdrop + 1.0 # float lowerCamelCase__ : str = not c.scale_attn_weights # bool lowerCamelCase__ : Any = c.summary_type + 'foo' # str c.update_from_string( f'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' ) self.assertEqual(lowerCamelCase_, c.n_embd, 'mismatch for key: n_embd' ) self.assertEqual(lowerCamelCase_, c.resid_pdrop, 'mismatch for key: resid_pdrop' ) self.assertEqual(lowerCamelCase_, c.scale_attn_weights, 'mismatch for key: scale_attn_weights' ) self.assertEqual(lowerCamelCase_, c.summary_type, 'mismatch for key: summary_type' ) def a__ (self ): '''simple docstring''' lowerCamelCase__ : Any = PretrainedConfig() lowerCamelCase__ : Union[str, Any] = [key for key in base_config.__dict__ if key not in config_common_kwargs] # If this part of the test fails, you have arguments to addin config_common_kwargs above. self.assertListEqual( lowerCamelCase_, ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] ) lowerCamelCase__ : str = [key for key, value in config_common_kwargs.items() if value == getattr(lowerCamelCase_, lowerCamelCase_ )] if len(lowerCamelCase_ ) > 0: raise ValueError( 'The following keys are set with the default values in' ' `test_configuration_common.config_common_kwargs` pick another value for them:' f''' {', '.join(lowerCamelCase_ )}.''' ) def a__ (self ): '''simple docstring''' with self.assertRaises(lowerCamelCase_ ): # config is in subfolder, the following should not work without specifying the subfolder lowerCamelCase__ : Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' ) lowerCamelCase__ : Any = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder', subfolder='bert' ) self.assertIsNotNone(lowerCamelCase_ ) def a__ (self ): '''simple docstring''' lowerCamelCase__ : int = mock.Mock() lowerCamelCase__ : str = 5_0_0 lowerCamelCase__ : Union[str, Any] = {} lowerCamelCase__ : Any = HTTPError lowerCamelCase__ : str = {} # Download this model to make sure it's in the cache. lowerCamelCase__ : Dict = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch('requests.Session.request', return_value=lowerCamelCase_ ) as mock_head: lowerCamelCase__ : Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' ) # This check we did call the fake head request mock_head.assert_called() def a__ (self ): '''simple docstring''' lowerCamelCase__ : int = BertConfig.from_pretrained( 'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' ) def a__ (self ): '''simple docstring''' lowerCamelCase__ : str = AutoConfig.from_pretrained('bert-base-cased' ) lowerCamelCase__ : Optional[Any] = ['config.4.0.0.json'] with tempfile.TemporaryDirectory() as tmp_dir: configuration.save_pretrained(lowerCamelCase_ ) lowerCamelCase__ : Tuple = 2 json.dump(configuration.to_dict(), open(os.path.join(lowerCamelCase_, 'config.4.0.0.json' ), 'w' ) ) # This should pick the new configuration file as the version of Transformers is > 4.0.0 lowerCamelCase__ : List[str] = AutoConfig.from_pretrained(lowerCamelCase_ ) self.assertEqual(new_configuration.hidden_size, 2 ) # Will need to be adjusted if we reach v42 and this test is still here. # Should pick the old configuration file as the version of Transformers is < 4.42.0 lowerCamelCase__ : Optional[Any] = ['config.42.0.0.json'] lowerCamelCase__ : List[Any] = 7_6_8 configuration.save_pretrained(lowerCamelCase_ ) shutil.move(os.path.join(lowerCamelCase_, 'config.4.0.0.json' ), os.path.join(lowerCamelCase_, 'config.42.0.0.json' ) ) lowerCamelCase__ : str = AutoConfig.from_pretrained(lowerCamelCase_ ) self.assertEqual(new_configuration.hidden_size, 7_6_8 ) def a__ (self ): '''simple docstring''' lowerCamelCase__ : int = 'hf-internal-testing/test-two-configs' import transformers as new_transformers lowerCamelCase__ : Dict = 'v4.0.0' lowerCamelCase__ , lowerCamelCase__ : str = new_transformers.models.auto.AutoConfig.from_pretrained( lowerCamelCase_, return_unused_kwargs=lowerCamelCase_ ) self.assertEqual(new_configuration.hidden_size, 2 ) # This checks `_configuration_file` ia not kept in the kwargs by mistake. self.assertDictEqual(lowerCamelCase_, {} ) # Testing an older version by monkey-patching the version in the module it's used. import transformers as old_transformers lowerCamelCase__ : Optional[Any] = 'v3.0.0' lowerCamelCase__ : Optional[int] = old_transformers.models.auto.AutoConfig.from_pretrained(lowerCamelCase_ ) self.assertEqual(old_configuration.hidden_size, 7_6_8 )
316
1
'''simple docstring''' def UpperCAmelCase_ ( __lowerCamelCase : str ): return [ txt[:a] + txt[a].upper() + txt[a + 1 :] for a in range(len(__lowerCamelCase ) ) if txt[a].isalpha() ] if __name__ == "__main__": __import__('''doctest''').testmod()
223
'''simple docstring''' def UpperCAmelCase_ ( __lowerCamelCase : int = 1_00 ): lowercase_ :Tuple = n * (n + 1) * (2 * n + 1) / 6 lowercase_ :List[str] = (n * (n + 1) / 2) ** 2 return int(square_of_sum - sum_of_squares ) if __name__ == "__main__": print(F'''{solution() = }''')
223
1
'''simple docstring''' def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: int ): """simple docstring""" SCREAMING_SNAKE_CASE : list[list[str]] = [[] for _ in range(__UpperCamelCase )] SCREAMING_SNAKE_CASE : Tuple = key - 1 if key <= 0: raise ValueError('Height of grid can\'t be 0 or negative' ) if key == 1 or len(__UpperCamelCase ) <= key: return input_string for position, character in enumerate(__UpperCamelCase ): SCREAMING_SNAKE_CASE : Optional[int] = position % (lowest * 2) # puts it in bounds SCREAMING_SNAKE_CASE : Dict = min(__UpperCamelCase ,lowest * 2 - num ) # creates zigzag pattern temp_grid[num].append(__UpperCamelCase ) SCREAMING_SNAKE_CASE : str = [''.join(__UpperCamelCase ) for row in temp_grid] SCREAMING_SNAKE_CASE : Any = ''.join(__UpperCamelCase ) return output_string def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: int ): """simple docstring""" SCREAMING_SNAKE_CASE : str = [] SCREAMING_SNAKE_CASE : int = key - 1 if key <= 0: raise ValueError('Height of grid can\'t be 0 or negative' ) if key == 1: return input_string SCREAMING_SNAKE_CASE : list[list[str]] = [[] for _ in range(__UpperCamelCase )] # generates template for position in range(len(__UpperCamelCase ) ): SCREAMING_SNAKE_CASE : Tuple = position % (lowest * 2) # puts it in bounds SCREAMING_SNAKE_CASE : int = min(__UpperCamelCase ,lowest * 2 - num ) # creates zigzag pattern temp_grid[num].append('*' ) SCREAMING_SNAKE_CASE : Tuple = 0 for row in temp_grid: # fills in the characters SCREAMING_SNAKE_CASE : List[str] = input_string[counter : counter + len(__UpperCamelCase )] grid.append(list(__UpperCamelCase ) ) counter += len(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Optional[int] = '' # reads as zigzag for position in range(len(__UpperCamelCase ) ): SCREAMING_SNAKE_CASE : Tuple = position % (lowest * 2) # puts it in bounds SCREAMING_SNAKE_CASE : Any = min(__UpperCamelCase ,lowest * 2 - num ) # creates zigzag pattern output_string += grid[num][0] grid[num].pop(0 ) return output_string def lowercase__( __UpperCamelCase: str ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = {} for key_guess in range(1 ,len(__UpperCamelCase ) ): # tries every key SCREAMING_SNAKE_CASE : Optional[Any] = decrypt(__UpperCamelCase ,__UpperCamelCase ) return results if __name__ == "__main__": import doctest doctest.testmod()
246
'''simple docstring''' from __future__ import annotations from typing import Generic, TypeVar UpperCamelCase_ = TypeVar("T") class _a ( Generic[T] ): '''simple docstring''' def __init__( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = data SCREAMING_SNAKE_CASE : int = self SCREAMING_SNAKE_CASE : Optional[Any] = 0 class _a ( Generic[T] ): '''simple docstring''' def __init__( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : dict[T, DisjointSetTreeNode[T]] = {} def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = DisjointSetTreeNode(A ) def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.map[data] if elem_ref != elem_ref.parent: SCREAMING_SNAKE_CASE : Union[str, Any] = self.find_set(elem_ref.parent.data ) return elem_ref.parent def UpperCamelCase_ ( self, A, A ): '''simple docstring''' if nodea.rank > nodea.rank: SCREAMING_SNAKE_CASE : Optional[int] = nodea else: SCREAMING_SNAKE_CASE : Optional[int] = nodea if nodea.rank == nodea.rank: nodea.rank += 1 def UpperCamelCase_ ( self, A, A ): '''simple docstring''' self.link(self.find_set(A ), self.find_set(A ) ) class _a ( Generic[T] ): '''simple docstring''' def __init__( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : dict[T, dict[T, int]] = {} def UpperCamelCase_ ( self, A ): '''simple docstring''' if node not in self.connections: SCREAMING_SNAKE_CASE : Dict = {} def UpperCamelCase_ ( self, A, A, A ): '''simple docstring''' self.add_node(A ) self.add_node(A ) SCREAMING_SNAKE_CASE : Dict = weight SCREAMING_SNAKE_CASE : Optional[int] = weight def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = [] SCREAMING_SNAKE_CASE : int = set() for start in self.connections: for end in self.connections[start]: if (start, end) not in seen: seen.add((end, start) ) edges.append((start, end, self.connections[start][end]) ) edges.sort(key=lambda A : x[2] ) # creating the disjoint set SCREAMING_SNAKE_CASE : int = DisjointSetTree[T]() for node in self.connections: disjoint_set.make_set(A ) # MST generation SCREAMING_SNAKE_CASE : str = 0 SCREAMING_SNAKE_CASE : str = 0 SCREAMING_SNAKE_CASE : Dict = GraphUndirectedWeighted[T]() while num_edges < len(self.connections ) - 1: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = edges[index] index += 1 SCREAMING_SNAKE_CASE : List[Any] = disjoint_set.find_set(A ) SCREAMING_SNAKE_CASE : Tuple = disjoint_set.find_set(A ) if parent_u != parent_v: num_edges += 1 graph.add_edge(A, A, A ) disjoint_set.union(A, A ) return graph
246
1
'''simple docstring''' import json import os import unittest from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowerCamelCase_ ( __a , unittest.TestCase ): lowerCAmelCase__ = LEDTokenizer lowerCAmelCase__ = LEDTokenizerFast lowerCAmelCase__ = True def lowercase_ ( self : int ): '''simple docstring''' super().setUp() UpperCAmelCase__ : List[Any] = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', ] UpperCAmelCase__ : Optional[Any] = dict(zip(_A , range(len(_A ) ) ) ) UpperCAmelCase__ : Union[str, Any] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] UpperCAmelCase__ : Any = {'''unk_token''': '''<unk>'''} UpperCAmelCase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) UpperCAmelCase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(_A ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(_A ) ) def lowercase_ ( self : Optional[int] , **_A : Any ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **_A ) def lowercase_ ( self : Union[str, Any] , **_A : Optional[Any] ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_A ) def lowercase_ ( self : Tuple , _A : List[str] ): '''simple docstring''' return "lower newer", "lower newer" @cached_property def lowercase_ ( self : List[Any] ): '''simple docstring''' return LEDTokenizer.from_pretrained('''allenai/led-base-16384''' ) @cached_property def lowercase_ ( self : Any ): '''simple docstring''' return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''' ) @require_torch def lowercase_ ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] UpperCAmelCase__ : Dict = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: UpperCAmelCase__ : Union[str, Any] = tokenizer(_A , max_length=len(_A ) , padding=_A , return_tensors='''pt''' ) self.assertIsInstance(_A , _A ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) UpperCAmelCase__ : int = batch.input_ids.tolist()[0] self.assertListEqual(_A , _A ) @require_torch def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: UpperCAmelCase__ : List[str] = tokenizer(_A , padding=_A , return_tensors='''pt''' ) self.assertIn('''input_ids''' , _A ) self.assertIn('''attention_mask''' , _A ) self.assertNotIn('''labels''' , _A ) self.assertNotIn('''decoder_attention_mask''' , _A ) @require_torch def lowercase_ ( self : str ): '''simple docstring''' UpperCAmelCase__ : Tuple = [ '''Summary of the text.''', '''Another summary.''', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: UpperCAmelCase__ : Optional[Any] = tokenizer(text_target=_A , max_length=32 , padding='''max_length''' , return_tensors='''pt''' ) self.assertEqual(32 , targets['''input_ids'''].shape[1] ) @require_torch def lowercase_ ( self : Tuple ): '''simple docstring''' for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: UpperCAmelCase__ : Any = tokenizer( ['''I am a small frog''' * 1_024, '''I am a small frog'''] , padding=_A , truncation=_A , return_tensors='''pt''' ) self.assertIsInstance(_A , _A ) self.assertEqual(batch.input_ids.shape , (2, 5_122) ) @require_torch def lowercase_ ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : Any = ['''A long paragraph for summarization.'''] UpperCAmelCase__ : List[Any] = [ '''Summary of the text.''', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: UpperCAmelCase__ : Optional[Any] = tokenizer(_A , return_tensors='''pt''' ) UpperCAmelCase__ : int = tokenizer(text_target=_A , return_tensors='''pt''' ) UpperCAmelCase__ : str = inputs['''input_ids'''] UpperCAmelCase__ : Tuple = targets['''input_ids'''] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) @require_torch def lowercase_ ( self : List[str] ): '''simple docstring''' for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: UpperCAmelCase__ : Tuple = ['''Summary of the text.''', '''Another summary.'''] UpperCAmelCase__ : List[str] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]] UpperCAmelCase__ : List[str] = tokenizer(_A , padding=_A ) UpperCAmelCase__ : str = [[0] * len(_A ) for x in encoded_output['''input_ids''']] UpperCAmelCase__ : Any = tokenizer.pad(_A ) self.assertSequenceEqual(outputs['''global_attention_mask'''] , _A ) def lowercase_ ( self : Tuple ): '''simple docstring''' pass def lowercase_ ( self : Dict ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): UpperCAmelCase__ : Optional[int] = self.rust_tokenizer_class.from_pretrained(_A , **_A ) UpperCAmelCase__ : int = self.tokenizer_class.from_pretrained(_A , **_A ) UpperCAmelCase__ : Any = '''A, <mask> AllenNLP sentence.''' UpperCAmelCase__ : Dict = tokenizer_r.encode_plus(_A , add_special_tokens=_A , return_token_type_ids=_A ) UpperCAmelCase__ : Optional[int] = tokenizer_p.encode_plus(_A , add_special_tokens=_A , return_token_type_ids=_A ) self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) ) self.assertEqual( sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , ) UpperCAmelCase__ : str = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] ) UpperCAmelCase__ : Tuple = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] ) self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual( _A , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] ) self.assertSequenceEqual( _A , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
181
'''simple docstring''' from typing import List, Optional, Tuple, Union import torch from ...utils import logging, randn_tensor from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline UpperCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name class lowerCamelCase_ ( __a ): def __init__( self : Any , _A : int , _A : str ): '''simple docstring''' super().__init__() self.register_modules(unet=_A , scheduler=_A ) @torch.no_grad() def __call__( self : Union[str, Any] , _A : int = 1 , _A : int = 100 , _A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _A : Optional[float] = None , _A : bool = True , ): '''simple docstring''' if audio_length_in_s is None: UpperCAmelCase__ : List[str] = self.unet.config.sample_size / self.unet.config.sample_rate UpperCAmelCase__ : Dict = audio_length_in_s * self.unet.config.sample_rate UpperCAmelCase__ : Union[str, Any] = 2 ** len(self.unet.up_blocks ) if sample_size < 3 * down_scale_factor: raise ValueError( f"""{audio_length_in_s} is too small. Make sure it's bigger or equal to""" f""" {3 * down_scale_factor / self.unet.config.sample_rate}.""" ) UpperCAmelCase__ : Optional[Any] = int(_A ) if sample_size % down_scale_factor != 0: UpperCAmelCase__ : List[str] = ( (audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1 ) * down_scale_factor logger.info( f"""{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled""" f""" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising""" ''' process.''' ) UpperCAmelCase__ : Union[str, Any] = int(_A ) UpperCAmelCase__ : Any = next(iter(self.unet.parameters() ) ).dtype UpperCAmelCase__ : Union[str, Any] = (batch_size, self.unet.config.in_channels, sample_size) if isinstance(_A , _A ) and len(_A ) != batch_size: raise ValueError( f"""You have passed a list of generators of length {len(_A )}, but requested an effective batch""" f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" ) UpperCAmelCase__ : int = randn_tensor(_A , generator=_A , device=self.device , dtype=_A ) # set step values self.scheduler.set_timesteps(_A , device=audio.device ) UpperCAmelCase__ : Union[str, Any] = self.scheduler.timesteps.to(_A ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output UpperCAmelCase__ : Any = self.unet(_A , _A ).sample # 2. compute previous image: x_t -> t_t-1 UpperCAmelCase__ : Union[str, Any] = self.scheduler.step(_A , _A , _A ).prev_sample UpperCAmelCase__ : Any = audio.clamp(-1 , 1 ).float().cpu().numpy() UpperCAmelCase__ : List[str] = audio[:, :, :original_sample_size] if not return_dict: return (audio,) return AudioPipelineOutput(audios=_A )
181
1
'''simple docstring''' from math import isclose, sqrt def a__ ( lowercase : float, lowercase : float, lowercase : float ) -> tuple[float, float, float]: """simple docstring""" _UpperCamelCase = point_y / 4 / point_x _UpperCamelCase = 2 * normal_gradient / (1 + normal_gradient * normal_gradient) _UpperCamelCase = (1 - normal_gradient * normal_gradient) / ( 1 + normal_gradient * normal_gradient ) _UpperCamelCase = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient) # to find the next point, solve the simultaeneous equations: # y^2 + 4x^2 = 100 # y - b = m * (x - a) # ==> A x^2 + B x + C = 0 _UpperCamelCase = outgoing_gradient**2 + 4 _UpperCamelCase = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x) _UpperCamelCase = (point_y - outgoing_gradient * point_x) ** 2 - 100 _UpperCamelCase = ( -linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) _UpperCamelCase = ( -linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) # two solutions, one of which is our input point _UpperCamelCase = x_minus if isclose(lowercase, lowercase ) else x_plus _UpperCamelCase = point_y + outgoing_gradient * (next_x - point_x) return next_x, next_y, outgoing_gradient def a__ ( lowercase : float = 1.4, lowercase : float = -9.6 ) -> int: """simple docstring""" _UpperCamelCase = 0 _UpperCamelCase = first_x_coord _UpperCamelCase = first_y_coord _UpperCamelCase = (1_0.1 - point_y) / (0.0 - point_x) while not (-0.0_1 <= point_x <= 0.0_1 and point_y > 0): _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = next_point(lowercase, lowercase, lowercase ) num_reflections += 1 return num_reflections if __name__ == "__main__": print(F"""{solution() = }""")
287
'''simple docstring''' import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import numpy as np from utils_multiple_choice import MultipleChoiceDataset, Split, processors import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process lowercase__ : Any = logging.getLogger(__name__) def a__ ( lowercase : Optional[Any], lowercase : Tuple ) -> Any: """simple docstring""" return (preds == labels).mean() @dataclass class __lowerCAmelCase : """simple docstring""" _snake_case : str = field( metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ) _snake_case : Optional[str] = field( default=__magic_name__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) _snake_case : Optional[str] = field( default=__magic_name__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) _snake_case : Optional[str] = field( default=__magic_name__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , ) @dataclass class __lowerCAmelCase : """simple docstring""" _snake_case : str = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(processors.keys() )} ) _snake_case : str = field(metadata={'help': 'Should contain the data files for the task.'} ) _snake_case : int = field( default=1_2_8 , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) _snake_case : bool = field( default=__magic_name__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} ) def a__ ( ) -> Optional[Any]: """simple docstring""" _UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use""" ''' --overwrite_output_dir to overcome.''' ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, ) logger.warning( '''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''', training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.local_rank != -1 ), training_args.fpaa, ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('''Training/evaluation parameters %s''', lowercase ) # Set seed set_seed(training_args.seed ) try: _UpperCamelCase = processors[data_args.task_name]() _UpperCamelCase = processor.get_labels() _UpperCamelCase = len(lowercase ) except KeyError: raise ValueError('''Task not found: %s''' % (data_args.task_name) ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _UpperCamelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=lowercase, finetuning_task=data_args.task_name, cache_dir=model_args.cache_dir, ) _UpperCamelCase = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, ) _UpperCamelCase = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path, from_tf=bool('''.ckpt''' in model_args.model_name_or_path ), config=lowercase, cache_dir=model_args.cache_dir, ) # Get datasets _UpperCamelCase = ( MultipleChoiceDataset( data_dir=data_args.data_dir, tokenizer=lowercase, task=data_args.task_name, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.train, ) if training_args.do_train else None ) _UpperCamelCase = ( MultipleChoiceDataset( data_dir=data_args.data_dir, tokenizer=lowercase, task=data_args.task_name, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.dev, ) if training_args.do_eval else None ) def compute_metrics(lowercase : EvalPrediction ) -> Dict: _UpperCamelCase = np.argmax(p.predictions, axis=1 ) return {"acc": simple_accuracy(lowercase, p.label_ids )} # Data collator _UpperCamelCase = DataCollatorWithPadding(lowercase, pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer _UpperCamelCase = Trainer( model=lowercase, args=lowercase, train_dataset=lowercase, eval_dataset=lowercase, compute_metrics=lowercase, data_collator=lowercase, ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation _UpperCamelCase = {} if training_args.do_eval: logger.info('''*** Evaluate ***''' ) _UpperCamelCase = trainer.evaluate() _UpperCamelCase = os.path.join(training_args.output_dir, '''eval_results.txt''' ) if trainer.is_world_master(): with open(lowercase, '''w''' ) as writer: logger.info('''***** Eval results *****''' ) for key, value in result.items(): logger.info(''' %s = %s''', lowercase, lowercase ) writer.write('''%s = %s\n''' % (key, value) ) results.update(lowercase ) return results def a__ ( lowercase : Tuple ) -> List[Any]: """simple docstring""" main() if __name__ == "__main__": main()
287
1
import warnings from ...utils import logging from .image_processing_flava import FlavaImageProcessor _lowerCamelCase : str = logging.get_logger(__name__) class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self : int , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : List[Any]) ->None: '''simple docstring''' warnings.warn( '''The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use FlavaImageProcessor instead.''' , UpperCAmelCase__ , ) super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__)
14
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) _lowerCamelCase : int = { """configuration_blip""": [ """BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BlipConfig""", """BlipTextConfig""", """BlipVisionConfig""", ], """processing_blip""": ["""BlipProcessor"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Tuple = ["""BlipImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : List[Any] = [ """BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """BlipModel""", """BlipPreTrainedModel""", """BlipForConditionalGeneration""", """BlipForQuestionAnswering""", """BlipVisionModel""", """BlipTextModel""", """BlipForImageTextRetrieval""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Optional[Any] = [ """TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFBlipModel""", """TFBlipPreTrainedModel""", """TFBlipForConditionalGeneration""", """TFBlipForQuestionAnswering""", """TFBlipVisionModel""", """TFBlipTextModel""", """TFBlipForImageTextRetrieval""", ] if TYPE_CHECKING: from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig from .processing_blip import BlipProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_blip import BlipImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blip import ( BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, BlipModel, BlipPreTrainedModel, BlipTextModel, BlipVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blip import ( TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFBlipForConditionalGeneration, TFBlipForImageTextRetrieval, TFBlipForQuestionAnswering, TFBlipModel, TFBlipPreTrainedModel, TFBlipTextModel, TFBlipVisionModel, ) else: import sys _lowerCamelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
14
1
import glob import os import random from string import ascii_lowercase, digits import cva _UpperCAmelCase : Union[str, Any] = """""" _UpperCAmelCase : Optional[Any] = """""" _UpperCAmelCase : List[str] = """""" _UpperCAmelCase : int = 1 # (0 is vertical, 1 is horizontal) def __lowerCamelCase ( ): '''simple docstring''' snake_case_ , snake_case_ = get_dataset(UpperCamelCase__ , UpperCamelCase__ ) print('Processing...' ) snake_case_ , snake_case_ , snake_case_ = update_image_and_anno(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) for index, image in enumerate(UpperCamelCase__ ): # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' snake_case_ = random_chars(32 ) snake_case_ = paths[index].split(os.sep )[-1].rsplit('.' , 1 )[0] snake_case_ = F'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}''' cva.imwrite(F'''/{file_root}.jpg''' , UpperCamelCase__ , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(F'''Success {index+1}/{len(UpperCamelCase__ )} with {file_name}''' ) snake_case_ = [] for anno in new_annos[index]: snake_case_ = F'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}''' annos_list.append(UpperCamelCase__ ) with open(F'''/{file_root}.txt''' , 'w' ) as outfile: outfile.write('\n'.join(line for line in annos_list ) ) def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = [] snake_case_ = [] for label_file in glob.glob(os.path.join(UpperCamelCase__ , '*.txt' ) ): snake_case_ = label_file.split(os.sep )[-1].rsplit('.' , 1 )[0] with open(UpperCamelCase__ ) as in_file: snake_case_ = in_file.readlines() snake_case_ = os.path.join(UpperCamelCase__ , F'''{label_name}.jpg''' ) snake_case_ = [] for obj_list in obj_lists: snake_case_ = obj_list.rstrip('\n' ).split(' ' ) boxes.append( [ int(obj[0] ), float(obj[1] ), float(obj[2] ), float(obj[3] ), float(obj[4] ), ] ) if not boxes: continue img_paths.append(UpperCamelCase__ ) labels.append(UpperCamelCase__ ) return img_paths, labels def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 1 ): '''simple docstring''' snake_case_ = [] snake_case_ = [] snake_case_ = [] for idx in range(len(UpperCamelCase__ ) ): snake_case_ = [] snake_case_ = img_list[idx] path_list.append(UpperCamelCase__ ) snake_case_ = anno_list[idx] snake_case_ = cva.imread(UpperCamelCase__ ) if flip_type == 1: snake_case_ = cva.flip(UpperCamelCase__ , UpperCamelCase__ ) for bbox in img_annos: snake_case_ = 1 - bbox[1] new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] ) elif flip_type == 0: snake_case_ = cva.flip(UpperCamelCase__ , UpperCamelCase__ ) for bbox in img_annos: snake_case_ = 1 - bbox[2] new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] ) new_annos_lists.append(UpperCamelCase__ ) new_imgs_list.append(UpperCamelCase__ ) return new_imgs_list, new_annos_lists, path_list def __lowerCamelCase ( UpperCamelCase__ = 32 ): '''simple docstring''' assert number_char > 1, "The number of character should greater than 1" snake_case_ = ascii_lowercase + digits return "".join(random.choice(UpperCamelCase__ ) for _ in range(UpperCamelCase__ ) ) if __name__ == "__main__": main() print("""DONE ✅""")
361
import tempfile import unittest from transformers import TaConfig, is_torch_available from transformers.testing_utils import ( require_sentencepiece, require_tokenizers, require_torch, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel class lowercase : def __init__( self , snake_case , snake_case=99 , snake_case=13 , snake_case=7 , snake_case=9 , snake_case=True , snake_case=True , snake_case=False , snake_case=32 , snake_case=5 , snake_case=4 , snake_case=37 , snake_case=8 , snake_case=0.1 , snake_case=0.0_02 , snake_case=1 , snake_case=0 , snake_case=0 , snake_case=None , snake_case=None , ): snake_case_ = parent snake_case_ = batch_size snake_case_ = encoder_seq_length snake_case_ = decoder_seq_length # For common tests snake_case_ = self.decoder_seq_length snake_case_ = is_training snake_case_ = use_attention_mask snake_case_ = use_labels snake_case_ = vocab_size snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = d_ff snake_case_ = relative_attention_num_buckets snake_case_ = dropout_rate snake_case_ = initializer_factor snake_case_ = eos_token_id snake_case_ = pad_token_id snake_case_ = decoder_start_token_id snake_case_ = None snake_case_ = decoder_layers def a ( self ): return TaConfig.from_pretrained('google/umt5-base' ) def a ( self , snake_case , snake_case , snake_case , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , ): if attention_mask is None: snake_case_ = input_ids.ne(config.pad_token_id ) if decoder_attention_mask is None: snake_case_ = decoder_input_ids.ne(config.pad_token_id ) if head_mask is None: snake_case_ = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=snake_case ) if decoder_head_mask is None: snake_case_ = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=snake_case ) if cross_attn_head_mask is None: snake_case_ = torch.ones( config.num_decoder_layers , config.num_attention_heads , device=snake_case ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } def a ( self ): snake_case_ = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size ) snake_case_ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) # we need to clamp the input ids here to avoid having pad token in between # this is because for NllbMoe the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input snake_case_ = input_ids.clamp(self.pad_token_id + 1 ) snake_case_ = decoder_input_ids.clamp(self.pad_token_id + 1 ) snake_case_ = self.get_config() snake_case_ = config.num_attention_heads snake_case_ = self.prepare_inputs_dict(snake_case , snake_case , snake_case ) return config, input_dict def a ( self ): snake_case_ , snake_case_ = self.prepare_config_and_inputs() return config, inputs_dict def a ( self ): return TaConfig( vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def a ( self ): return TaConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ): snake_case_ = UMTaModel(config=snake_case ) model.to(snake_case ) model.eval() snake_case_ = model( input_ids=snake_case , decoder_input_ids=snake_case , attention_mask=snake_case , decoder_attention_mask=snake_case , ) snake_case_ = model(input_ids=snake_case , decoder_input_ids=snake_case ) snake_case_ = result.last_hidden_state snake_case_ = result.past_key_values snake_case_ = result.encoder_last_hidden_state self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) ) self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) ) # There should be `num_layers` key value embeddings stored in decoder_past self.parent.assertEqual(len(snake_case ) , config.num_layers ) # There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple self.parent.assertEqual(len(decoder_past[0] ) , 4 ) def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ): snake_case_ = UMTaModel(config=snake_case ).get_decoder().to(snake_case ).eval() # first forward pass snake_case_ = model(snake_case , use_cache=snake_case ) snake_case_ = model(snake_case ) snake_case_ = model(snake_case , use_cache=snake_case ) self.parent.assertTrue(len(snake_case ) == len(snake_case ) ) self.parent.assertTrue(len(snake_case ) == len(snake_case ) + 1 ) snake_case_ , snake_case_ = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids snake_case_ = ids_tensor((self.batch_size, 1) , config.vocab_size ) # append to next input_ids and snake_case_ = torch.cat([input_ids, next_tokens] , dim=-1 ) snake_case_ = model(snake_case )['last_hidden_state'] snake_case_ = model(snake_case , past_key_values=snake_case )['last_hidden_state'] # select random slice snake_case_ = ids_tensor((1,) , output_from_past.shape[-1] ).item() snake_case_ = output_from_no_past[:, -1, random_slice_idx].detach() snake_case_ = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(snake_case , snake_case , atol=1e-3 ) ) def a ( self , snake_case , snake_case , ): snake_case_ = UMTaModel(config=snake_case ).to(snake_case ).half().eval() snake_case_ = model(**snake_case )['last_hidden_state'] self.parent.assertFalse(torch.isnan(snake_case ).any().item() ) @require_torch class lowercase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Optional[Any] = ( (UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else () ) __SCREAMING_SNAKE_CASE : int = (UMTaForConditionalGeneration,) if is_torch_available() else () __SCREAMING_SNAKE_CASE : Optional[int] = ( { '''conversational''': UMTaForConditionalGeneration, '''feature-extraction''': UMTaModel, '''summarization''': UMTaForConditionalGeneration, '''text2text-generation''': UMTaForConditionalGeneration, '''translation''': UMTaForConditionalGeneration, '''question-answering''': UMTaForQuestionAnswering, } if is_torch_available() else {} ) __SCREAMING_SNAKE_CASE : List[str] = True __SCREAMING_SNAKE_CASE : str = False __SCREAMING_SNAKE_CASE : int = False __SCREAMING_SNAKE_CASE : Optional[int] = True __SCREAMING_SNAKE_CASE : Any = True # The small UMT5 model needs higher percentages for CPU/MP tests __SCREAMING_SNAKE_CASE : List[str] = [0.8, 0.9] def a ( self ): snake_case_ = UMTaModelTester(self ) @unittest.skip('Test has a segmentation fault on torch 1.8.0' ) def a ( self ): snake_case_ = self.model_tester.prepare_config_and_inputs() snake_case_ = UMTaModel(config_and_inputs[0] ).to(snake_case ) with tempfile.TemporaryDirectory() as tmpdirname: torch.onnx.export( snake_case , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F'''{tmpdirname}/t5_test.onnx''' , export_params=snake_case , opset_version=9 , input_names=['input_ids', 'decoder_input_ids'] , ) @unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' ) def a ( self ): snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_fpaa_forward(*snake_case ) def a ( self ): snake_case_ = ['encoder_attentions', 'decoder_attentions', 'cross_attentions'] snake_case_ = self.model_tester.prepare_config_and_inputs() snake_case_ = config_and_inputs[0] snake_case_ = UMTaForConditionalGeneration(snake_case ).eval() model.to(snake_case ) snake_case_ = { 'head_mask': torch.zeros(config.num_layers , config.num_heads , device=snake_case ), 'decoder_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=snake_case ), 'cross_attn_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=snake_case ), } for attn_name, (name, mask) in zip(snake_case , head_masking.items() ): snake_case_ = {name: mask} # Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified if name == "head_mask": snake_case_ = torch.ones( config.num_decoder_layers , config.num_heads , device=snake_case ) snake_case_ = model.generate( config_and_inputs[1]['input_ids'] , num_beams=1 , max_length=3 , output_attentions=snake_case , return_dict_in_generate=snake_case , **snake_case , ) # We check the state of decoder_attentions and cross_attentions just from the last step snake_case_ = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1] self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 ) @unittest.skip('Does not work on the tiny model as we keep hitting edge cases.' ) def a ( self ): pass @require_torch @require_sentencepiece @require_tokenizers class lowercase ( unittest.TestCase ): @slow @unittest.skip( 'Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged' ) def a ( self ): snake_case_ = UMTaForConditionalGeneration.from_pretrained('google/umt5-small' , return_dict=snake_case ).to(snake_case ) snake_case_ = AutoTokenizer.from_pretrained('google/umt5-small' , use_fast=snake_case , legacy=snake_case ) snake_case_ = [ 'Bonjour monsieur <extra_id_0> bien <extra_id_1>.', 'No se como puedo <extra_id_0>.', 'This is the reason why we <extra_id_0> them.', 'The <extra_id_0> walks in <extra_id_1>, seats', 'A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.', ] snake_case_ = tokenizer(snake_case , return_tensors='pt' , padding=snake_case ).input_ids # fmt: off snake_case_ = torch.tensor( [ [ 3_8530, 21_0703, 25_6299, 1410, 25_6298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 826, 321, 671, 2_5922, 25_6299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 1460, 339, 312, 1_9014, 1_0620, 758, 25_6299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0], [ 517, 25_6299, 1_4869, 281, 301, 25_6298, 275, 11_9983,1, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 320, 25_6299, 1_4869, 281, 2234, 289, 2275, 333,6_1391, 289, 25_6298, 543, 25_6297, 16_8714, 329, 25_6296,274, 1], ] ) # fmt: on torch.testing.assert_allclose(snake_case , snake_case ) snake_case_ = model.generate(input_ids.to(snake_case ) ) snake_case_ = [ '<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>', '<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', '<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', '<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', '<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>', ] snake_case_ = tokenizer.batch_decode(snake_case ) self.assertEqual(snake_case , snake_case )
200
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase__ :Any = { '''configuration_funnel''': ['''FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FunnelConfig'''], '''convert_funnel_original_tf_checkpoint_to_pytorch''': [], '''tokenization_funnel''': ['''FunnelTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ :List[str] = ['''FunnelTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ :Tuple = [ '''FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''', '''FunnelBaseModel''', '''FunnelForMaskedLM''', '''FunnelForMultipleChoice''', '''FunnelForPreTraining''', '''FunnelForQuestionAnswering''', '''FunnelForSequenceClassification''', '''FunnelForTokenClassification''', '''FunnelModel''', '''FunnelPreTrainedModel''', '''load_tf_weights_in_funnel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ :Union[str, Any] = [ '''TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFFunnelBaseModel''', '''TFFunnelForMaskedLM''', '''TFFunnelForMultipleChoice''', '''TFFunnelForPreTraining''', '''TFFunnelForQuestionAnswering''', '''TFFunnelForSequenceClassification''', '''TFFunnelForTokenClassification''', '''TFFunnelModel''', '''TFFunnelPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig from .tokenization_funnel import FunnelTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_funnel_fast import FunnelTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_funnel import ( FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, FunnelBaseModel, FunnelForMaskedLM, FunnelForMultipleChoice, FunnelForPreTraining, FunnelForQuestionAnswering, FunnelForSequenceClassification, FunnelForTokenClassification, FunnelModel, FunnelPreTrainedModel, load_tf_weights_in_funnel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_funnel import ( TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForSequenceClassification, TFFunnelForTokenClassification, TFFunnelModel, TFFunnelPreTrainedModel, ) else: import sys lowerCAmelCase__ :Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
329
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ :Dict = logging.get_logger(__name__) lowerCAmelCase__ :Optional[int] = {'''openai-gpt''': '''https://huggingface.co/openai-gpt/resolve/main/config.json'''} class __a ( UpperCAmelCase ): _a : List[str] = 'openai-gpt' _a : int = { 'max_position_embeddings': 'n_positions', 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self , _SCREAMING_SNAKE_CASE=40478 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=1e-5 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE="cls_index" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.1 , **_SCREAMING_SNAKE_CASE , ) -> str: """simple docstring""" _UpperCAmelCase = vocab_size _UpperCAmelCase = n_positions _UpperCAmelCase = n_embd _UpperCAmelCase = n_layer _UpperCAmelCase = n_head _UpperCAmelCase = afn _UpperCAmelCase = resid_pdrop _UpperCAmelCase = embd_pdrop _UpperCAmelCase = attn_pdrop _UpperCAmelCase = layer_norm_epsilon _UpperCAmelCase = initializer_range _UpperCAmelCase = summary_type _UpperCAmelCase = summary_use_proj _UpperCAmelCase = summary_activation _UpperCAmelCase = summary_first_dropout _UpperCAmelCase = summary_proj_to_labels super().__init__(**_SCREAMING_SNAKE_CASE )
329
1
def lowerCamelCase__ ( a__ : float ) -> float: if edge <= 0 or not isinstance(a__ , a__ ): raise ValueError("""Length must be a positive.""" ) return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2) def lowerCamelCase__ ( a__ : float ) -> float: if edge <= 0 or not isinstance(a__ , a__ ): raise ValueError("""Length must be a positive.""" ) return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3) if __name__ == "__main__": import doctest doctest.testmod()
261
from __future__ import annotations def lowerCamelCase__ ( a__ : list[list[int]] ) -> int: # preprocessing the first row for i in range(1 , len(matrix[0] ) ): matrix[0][i] += matrix[0][i - 1] # preprocessing the first column for i in range(1 , len(a__ ) ): matrix[i][0] += matrix[i - 1][0] # updating the path cost for current position for i in range(1 , len(a__ ) ): for j in range(1 , len(matrix[0] ) ): matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] ) return matrix[-1][-1] if __name__ == "__main__": import doctest doctest.testmod()
261
1
"""simple docstring""" import numpy as np class __lowerCAmelCase : def __init__( self ): '''simple docstring''' __UpperCamelCase = (0, 0) __UpperCamelCase = None __UpperCamelCase = 0 __UpperCamelCase = 0 __UpperCamelCase = 0 def __eq__( self , __UpperCAmelCase ): '''simple docstring''' return self.position == cell.position def UpperCAmelCase ( self ): '''simple docstring''' print(self.position ) class __lowerCAmelCase : def __init__( self , __UpperCAmelCase=(5, 5) ): '''simple docstring''' __UpperCamelCase = np.zeros(__UpperCAmelCase ) __UpperCamelCase = world_size[0] __UpperCamelCase = world_size[1] def UpperCAmelCase ( self ): '''simple docstring''' print(self.w ) def UpperCAmelCase ( self , __UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = [ (-1, -1), (-1, 0), (-1, 1), (0, -1), (0, 1), (1, -1), (1, 0), (1, 1), ] __UpperCamelCase = cell.position[0] __UpperCamelCase = cell.position[1] __UpperCamelCase = [] for n in neughbour_cord: __UpperCamelCase = current_x + n[0] __UpperCamelCase = current_y + n[1] if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit: __UpperCamelCase = Cell() __UpperCamelCase = (x, y) __UpperCamelCase = cell neighbours.append(__UpperCAmelCase ) return neighbours def A ( snake_case :Dict , snake_case :Tuple , snake_case :int ) -> Any: __UpperCamelCase = [] __UpperCamelCase = [] _open.append(snake_case ) while _open: __UpperCamelCase = np.argmin([n.f for n in _open] ) __UpperCamelCase = _open[min_f] _closed.append(_open.pop(snake_case ) ) if current == goal: break for n in world.get_neigbours(snake_case ): for c in _closed: if c == n: continue __UpperCamelCase = current.g + 1 __UpperCamelCase , __UpperCamelCase = n.position __UpperCamelCase , __UpperCamelCase = goal.position __UpperCamelCase = (ya - ya) ** 2 + (xa - xa) ** 2 __UpperCamelCase = n.h + n.g for c in _open: if c == n and c.f < n.f: continue _open.append(snake_case ) __UpperCamelCase = [] while current.parent is not None: path.append(current.position ) __UpperCamelCase = current.parent path.append(current.position ) return path[::-1] if __name__ == "__main__": UpperCamelCase : Optional[int] = Gridworld() # Start position and goal UpperCamelCase : List[Any] = Cell() UpperCamelCase : int = (0, 0) UpperCamelCase : str = Cell() UpperCamelCase : Optional[int] = (4, 4) print(f'''path from {start.position} to {goal.position}''') UpperCamelCase : Optional[Any] = astar(world, start, goal) # Just for visual reasons. for i in s: UpperCamelCase : Tuple = 1 print(world.w)
316
"""simple docstring""" import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_gpta import GPTaTokenizer if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation UpperCamelCase : Any = logging.get_logger(__name__) UpperCamelCase : Any = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} UpperCamelCase : Dict = { "vocab_file": { "gpt2": "https://huggingface.co/gpt2/resolve/main/vocab.json", "gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/vocab.json", "gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/vocab.json", "gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/vocab.json", "distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/vocab.json", }, "merges_file": { "gpt2": "https://huggingface.co/gpt2/resolve/main/merges.txt", "gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/merges.txt", "gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/merges.txt", "gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/merges.txt", "distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/merges.txt", }, "tokenizer_file": { "gpt2": "https://huggingface.co/gpt2/resolve/main/tokenizer.json", "gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json", "gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/tokenizer.json", "gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json", "distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/tokenizer.json", }, } UpperCamelCase : Dict = { "gpt2": 1_0_2_4, "gpt2-medium": 1_0_2_4, "gpt2-large": 1_0_2_4, "gpt2-xl": 1_0_2_4, "distilgpt2": 1_0_2_4, } class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): lowercase = VOCAB_FILES_NAMES lowercase = PRETRAINED_VOCAB_FILES_MAP lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase = ["input_ids", "attention_mask"] lowercase = GPTaTokenizer def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase=False , **__UpperCAmelCase , ): '''simple docstring''' super().__init__( __UpperCAmelCase , __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , unk_token=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , **__UpperCAmelCase , ) __UpperCamelCase = kwargs.pop('add_bos_token' , __UpperCAmelCase ) __UpperCamelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space' , __UpperCAmelCase ) != add_prefix_space: __UpperCamelCase = getattr(__UpperCAmelCase , pre_tok_state.pop('type' ) ) __UpperCamelCase = add_prefix_space __UpperCamelCase = pre_tok_class(**__UpperCAmelCase ) __UpperCamelCase = add_prefix_space def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = kwargs.get('is_split_into_words' , __UpperCAmelCase ) assert self.add_prefix_space or not is_split_into_words, ( F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*__UpperCAmelCase , **__UpperCAmelCase ) def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = kwargs.get('is_split_into_words' , __UpperCAmelCase ) assert self.add_prefix_space or not is_split_into_words, ( F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' "to use it with pretokenized inputs." ) return super()._encode_plus(*__UpperCAmelCase , **__UpperCAmelCase ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ): '''simple docstring''' __UpperCamelCase = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase ) return tuple(__UpperCAmelCase ) def UpperCAmelCase ( self , __UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) + [self.eos_token_id] ) if len(__UpperCAmelCase ) > self.model_max_length: __UpperCamelCase = input_ids[-self.model_max_length :] return input_ids
316
1
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation import warnings from .state import AcceleratorState, GradientState warnings.filterwarnings("ignore", category=UserWarning, module="torch.optim.lr_scheduler") class __lowerCAmelCase : """simple docstring""" def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = True , lowerCamelCase__ = False ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = scheduler __lowerCamelCase = optimizers if isinstance(lowerCamelCase__ , (list, tuple) ) else [optimizers] __lowerCamelCase = split_batches __lowerCamelCase = step_with_optimizer __lowerCamelCase = GradientState() def lowercase_ ( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict: '''simple docstring''' if not self.step_with_optimizer: # No link between scheduler and optimizer -> just step self.scheduler.step(*lowerCamelCase__ , **lowerCamelCase__ ) return # Otherwise, first make sure the optimizer was stepped. if not self.gradient_state.sync_gradients: if self.gradient_state.adjust_scheduler: self.scheduler._step_count += 1 return for opt in self.optimizers: if opt.step_was_skipped: return if self.split_batches: # Split batches -> the training dataloader batch size is not changed so one step per training step self.scheduler.step(*lowerCamelCase__ , **lowerCamelCase__ ) else: # Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do # num_processes steps per training step __lowerCamelCase = AcceleratorState().num_processes for _ in range(lowerCamelCase__ ): # Special case when using OneCycle and `drop_last` was not used if hasattr(self.scheduler , 'total_steps' ): if self.scheduler._step_count <= self.scheduler.total_steps: self.scheduler.step(*lowerCamelCase__ , **lowerCamelCase__ ) else: self.scheduler.step(*lowerCamelCase__ , **lowerCamelCase__ ) def lowercase_ ( self ) -> Optional[Any]: '''simple docstring''' return self.scheduler.get_last_lr() def lowercase_ ( self ) -> List[Any]: '''simple docstring''' return self.scheduler.state_dict() def lowercase_ ( self , lowerCamelCase__ ) -> Union[str, Any]: '''simple docstring''' self.scheduler.load_state_dict(lowerCamelCase__ ) def lowercase_ ( self ) -> List[Any]: '''simple docstring''' return self.scheduler.get_lr() def lowercase_ ( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[Any]: '''simple docstring''' return self.scheduler.print_lr(*lowerCamelCase__ , **lowerCamelCase__ )
368
__A = { "joule": 1.0, "kilojoule": 10_00, "megajoule": 1_00_00_00, "gigajoule": 10_00_00_00_00, "wattsecond": 1.0, "watthour": 36_00, "kilowatthour": 3_60_00_00, "newtonmeter": 1.0, "calorie_nutr": 41_86.8, "kilocalorie_nutr": 4_18_68_00.00, "electronvolt": 1.6_0_2_1_7_6_6_3_4e-1_9, "britishthermalunit_it": 10_55.0_55_85, "footpound": 1.3_5_5_8_1_8, } def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : float ) -> float: """simple docstring""" if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION: __lowerCamelCase = ( F"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n""" F"""Valid values are: {', '.join(UpperCamelCase__ )}""" ) raise ValueError(UpperCamelCase__ ) return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type] if __name__ == "__main__": import doctest doctest.testmod()
348
0
'''simple docstring''' from dataclasses import dataclass from typing import Tuple import numpy as np import torch @dataclass class UpperCAmelCase_ : """simple docstring""" lowercase = 42 # [batch_size x 3] lowercase = 42 # [batch_size x 3] lowercase = 42 # [batch_size x 3] lowercase = 42 # [batch_size x 3] lowercase = 42 lowercase = 42 lowercase = 42 lowercase = 42 lowercase = 42 def lowerCamelCase ( self : Optional[Any] ): assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0] assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3 assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2 def lowerCamelCase ( self : Union[str, Any] ): return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) ) def lowerCamelCase ( self : str ): return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) ) def lowerCamelCase ( self : List[str] ): snake_case__ : str = torch.arange(self.height * self.width ) snake_case__ : Optional[Any] = torch.stack( [ pixel_indices % self.width, torch.div(snake_case_ , self.width , rounding_mode="""trunc""" ), ] , axis=1 , ) return coords @property def lowerCamelCase ( self : Tuple ): snake_case__ , *snake_case__ : List[Any] = self.shape snake_case__ : str = int(np.prod(snake_case_ ) ) snake_case__ : List[Any] = self.get_image_coords() snake_case__ : Tuple = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] ) snake_case__ : int = self.get_camera_rays(snake_case_ ) snake_case__ : Tuple = rays.view(snake_case_ , inner_batch_size * self.height * self.width , 2 , 3 ) return rays def lowerCamelCase ( self : Any , snake_case_ : torch.Tensor ): snake_case__ , *snake_case__ , snake_case__ : Union[str, Any] = coords.shape assert n_coords == 2 assert batch_size == self.origin.shape[0] snake_case__ : int = coords.view(snake_case_ , -1 , 2 ) snake_case__ : Dict = self.resolution() snake_case__ : List[str] = self.fov() snake_case__ : Union[str, Any] = (flat.float() / (res - 1)) * 2 - 1 snake_case__ : str = fracs * torch.tan(fov / 2 ) snake_case__ : int = fracs.view(snake_case_ , -1 , 2 ) snake_case__ : List[str] = ( self.z.view(snake_case_ , 1 , 3 ) + self.x.view(snake_case_ , 1 , 3 ) * fracs[:, :, :1] + self.y.view(snake_case_ , 1 , 3 ) * fracs[:, :, 1:] ) snake_case__ : str = directions / directions.norm(dim=-1 , keepdim=snake_case_ ) snake_case__ : Dict = torch.stack( [ torch.broadcast_to(self.origin.view(snake_case_ , 1 , 3 ) , [batch_size, directions.shape[1], 3] ), directions, ] , dim=2 , ) return rays.view(snake_case_ , *snake_case_ , 2 , 3 ) def lowerCamelCase ( self : Union[str, Any] , snake_case_ : int , snake_case_ : int ): assert width * self.height == height * self.width, "The aspect ratio should not change." return DifferentiableProjectiveCamera( origin=self.origin , x=self.x , y=self.y , z=self.z , width=snake_case_ , height=snake_case_ , x_fov=self.x_fov , y_fov=self.y_fov , ) def __snake_case( _lowerCAmelCase ) -> DifferentiableProjectiveCamera: snake_case__ : Union[str, Any] = [] snake_case__ : int = [] snake_case__ : List[Any] = [] snake_case__ : Tuple = [] for theta in np.linspace(0 , 2 * np.pi , num=20 ): snake_case__ : Any = np.array([np.sin(_lowerCAmelCase ), np.cos(_lowerCAmelCase ), -0.5] ) z /= np.sqrt(np.sum(z**2 ) ) snake_case__ : Optional[int] = -z * 4 snake_case__ : List[str] = np.array([np.cos(_lowerCAmelCase ), -np.sin(_lowerCAmelCase ), 0.0] ) snake_case__ : Optional[int] = np.cross(_lowerCAmelCase , _lowerCAmelCase ) origins.append(_lowerCAmelCase ) xs.append(_lowerCAmelCase ) ys.append(_lowerCAmelCase ) zs.append(_lowerCAmelCase ) return DifferentiableProjectiveCamera( origin=torch.from_numpy(np.stack(_lowerCAmelCase , axis=0 ) ).float() , x=torch.from_numpy(np.stack(_lowerCAmelCase , axis=0 ) ).float() , y=torch.from_numpy(np.stack(_lowerCAmelCase , axis=0 ) ).float() , z=torch.from_numpy(np.stack(_lowerCAmelCase , axis=0 ) ).float() , width=_lowerCAmelCase , height=_lowerCAmelCase , x_fov=0.7 , y_fov=0.7 , shape=(1, len(_lowerCAmelCase )) , )
35
'''simple docstring''' # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from argparse import ArgumentParser from accelerate.commands.config import get_config_parser from accelerate.commands.env import env_command_parser from accelerate.commands.launch import launch_command_parser from accelerate.commands.test import test_command_parser from accelerate.commands.tpu import tpu_command_parser def __snake_case ( ): lowerCamelCase_ = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=UpperCAmelCase_ ) lowerCamelCase_ = parser.add_subparsers(help="accelerate command helpers" ) # Register commands get_config_parser(subparsers=UpperCAmelCase_ ) env_command_parser(subparsers=UpperCAmelCase_ ) launch_command_parser(subparsers=UpperCAmelCase_ ) tpu_command_parser(subparsers=UpperCAmelCase_ ) test_command_parser(subparsers=UpperCAmelCase_ ) # Let's go lowerCamelCase_ = parser.parse_args() if not hasattr(UpperCAmelCase_ , "func" ): parser.print_help() exit(1 ) # Run args.func(UpperCAmelCase_ ) if __name__ == "__main__": main()
55
0
import unittest from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers @require_sentencepiece @slow # see https://github.com/huggingface/transformers/issues/11457 class __magic_name__ ( lowerCamelCase__ , unittest.TestCase ): """simple docstring""" __UpperCamelCase = BarthezTokenizer __UpperCamelCase = BarthezTokenizerFast __UpperCamelCase = True __UpperCamelCase = True def SCREAMING_SNAKE_CASE ( self :List[str] ): '''simple docstring''' super().setUp() A_ : int = BarthezTokenizerFast.from_pretrained("moussaKam/mbarthez" ) tokenizer.save_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname , legacy_format=snake_case ) A_ : int = tokenizer def SCREAMING_SNAKE_CASE ( self :List[Any] ): '''simple docstring''' A_ : Any = "<pad>" A_ : Dict = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case ) , snake_case ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case ) , snake_case ) def SCREAMING_SNAKE_CASE ( self :str ): '''simple docstring''' A_ : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<s>" ) self.assertEqual(vocab_keys[1] , "<pad>" ) self.assertEqual(vocab_keys[-1] , "<mask>" ) self.assertEqual(len(snake_case ) , 101_122 ) def SCREAMING_SNAKE_CASE ( self :str ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 101_122 ) @require_torch def SCREAMING_SNAKE_CASE ( self :Tuple ): '''simple docstring''' A_ : int = ["A long paragraph for summarization.", "Another paragraph for summarization."] A_ : Any = [0, 57, 3_018, 70_307, 91, 2] A_ : Union[str, Any] = self.tokenizer( snake_case , max_length=len(snake_case ) , padding=snake_case , truncation=snake_case , return_tensors="pt" ) self.assertIsInstance(snake_case , snake_case ) self.assertEqual((2, 6) , batch.input_ids.shape ) self.assertEqual((2, 6) , batch.attention_mask.shape ) A_ : Union[str, Any] = batch.input_ids.tolist()[0] self.assertListEqual(snake_case , snake_case ) def SCREAMING_SNAKE_CASE ( self :Dict ): '''simple docstring''' if not self.test_rust_tokenizer: return A_ : int = self.get_tokenizer() A_ : Any = self.get_rust_tokenizer() A_ : Tuple = "I was born in 92000, and this is falsé." A_ : Dict = tokenizer.tokenize(snake_case ) A_ : Optional[Any] = rust_tokenizer.tokenize(snake_case ) self.assertListEqual(snake_case , snake_case ) A_ : List[str] = tokenizer.encode(snake_case , add_special_tokens=snake_case ) A_ : Any = rust_tokenizer.encode(snake_case , add_special_tokens=snake_case ) self.assertListEqual(snake_case , snake_case ) A_ : Dict = self.get_rust_tokenizer() A_ : Optional[int] = tokenizer.encode(snake_case ) A_ : Any = rust_tokenizer.encode(snake_case ) self.assertListEqual(snake_case , snake_case ) @slow def SCREAMING_SNAKE_CASE ( self :Any ): '''simple docstring''' A_ : Tuple = {"input_ids": [[0, 490, 14_328, 4_507, 354, 47, 43_669, 95, 25, 78_117, 20_215, 19_779, 190, 22, 400, 4, 35_343, 80_310, 603, 86, 24_937, 105, 33_438, 94_762, 196, 39_642, 7, 15, 15_933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10_534, 87, 25, 66, 3_358, 196, 55_289, 8, 82_961, 81, 2_204, 75_203, 7, 15, 763, 12_956, 216, 178, 14_328, 9_595, 1_377, 69_693, 7, 448, 71_021, 196, 18_106, 1_437, 13_974, 108, 9_083, 4, 49_315, 7, 39, 86, 1_326, 2_793, 46_333, 4, 448, 196, 74_588, 7, 49_315, 7, 39, 21, 822, 38_470, 74, 21, 66_723, 62_480, 8, 22_050, 5, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # moussaKam/mbarthez is a french model. So we also use french texts. A_ : Optional[Any] = [ "Le transformeur est un modèle d'apprentissage profond introduit en 2017, " "utilisé principalement dans le domaine du traitement automatique des langues (TAL).", "À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus " "pour gérer des données séquentielles, telles que le langage naturel, pour des tâches " "telles que la traduction et la synthèse de texte.", ] self.tokenizer_integration_test_util( expected_encoding=snake_case , model_name="moussaKam/mbarthez" , revision="c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6" , sequences=snake_case , )
354
from collections.abc import Sequence def __snake_case ( _lowerCAmelCase : Sequence[int] | None = None ) -> int: if nums is None or not nums: raise ValueError("Input sequence should not be empty" ) A_ : Any = nums[0] for i in range(1 , len(_lowerCAmelCase ) ): A_ : Any = nums[i] A_ : List[str] = max(_lowerCAmelCase , ans + num , _lowerCAmelCase ) return ans if __name__ == "__main__": import doctest doctest.testmod() # Try on a sample input from the user _lowerCAmelCase : List[Any] = int(input('''Enter number of elements : ''').strip()) _lowerCAmelCase : Dict = list(map(int, input('''\nEnter the numbers : ''').strip().split()))[:n] print(max_subsequence_sum(array))
70
0
'''simple docstring''' def SCREAMING_SNAKE_CASE_ () -> int: return [ a * b * (1000 - a - b) for a in range(1 , 999 ) for b in range(_lowerCAmelCase , 999 ) if (a * a + b * b == (1000 - a - b) ** 2) ][0] if __name__ == "__main__": print(F'{solution() = }')
41
"""simple docstring""" import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( ConditionalDetrConfig, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase__ : List[Any] = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) lowerCamelCase__ : int = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append( (F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight''')) rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias''')) rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight''')) rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias''')) rename_keys.append( (F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias''')) rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight''')) rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias''')) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append( ( F'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''', F'''decoder.layers.{i}.encoder_attn.out_proj.weight''', ) ) rename_keys.append( ( F'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''', F'''decoder.layers.{i}.encoder_attn.out_proj.bias''', ) ) rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight''')) rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight''')) rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight''')) rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias''')) # q, k, v projections in self/cross-attention in decoder for conditional DETR rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', F'''decoder.layers.{i}.sa_qcontent_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', F'''decoder.layers.{i}.sa_kcontent_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', F'''decoder.layers.{i}.sa_qpos_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', F'''decoder.layers.{i}.sa_kpos_proj.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.weight''', F'''decoder.layers.{i}.sa_v_proj.weight''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', F'''decoder.layers.{i}.ca_qcontent_proj.weight''') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight")) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', F'''decoder.layers.{i}.ca_kcontent_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', F'''decoder.layers.{i}.ca_kpos_proj.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.weight''', F'''decoder.layers.{i}.ca_v_proj.weight''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', F'''decoder.layers.{i}.ca_qpos_sine_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', F'''decoder.layers.{i}.sa_qcontent_proj.bias''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', F'''decoder.layers.{i}.sa_kcontent_proj.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', F'''decoder.layers.{i}.sa_qpos_proj.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', F'''decoder.layers.{i}.sa_kpos_proj.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.bias''', F'''decoder.layers.{i}.sa_v_proj.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', F'''decoder.layers.{i}.ca_qcontent_proj.bias''') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias")) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', F'''decoder.layers.{i}.ca_kcontent_proj.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', F'''decoder.layers.{i}.ca_kpos_proj.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.bias''', F'''decoder.layers.{i}.ca_v_proj.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', F'''decoder.layers.{i}.ca_qpos_sine_proj.bias''') ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads # for conditional DETR, also convert reference point head and query scale MLP rename_keys.extend( [ ('''input_proj.weight''', '''input_projection.weight'''), ('''input_proj.bias''', '''input_projection.bias'''), ('''query_embed.weight''', '''query_position_embeddings.weight'''), ('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''), ('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''), ('''class_embed.weight''', '''class_labels_classifier.weight'''), ('''class_embed.bias''', '''class_labels_classifier.bias'''), ('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''), ('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''), ('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''), ('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''), ('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''), ('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''), ('''transformer.decoder.ref_point_head.layers.0.weight''', '''decoder.ref_point_head.layers.0.weight'''), ('''transformer.decoder.ref_point_head.layers.0.bias''', '''decoder.ref_point_head.layers.0.bias'''), ('''transformer.decoder.ref_point_head.layers.1.weight''', '''decoder.ref_point_head.layers.1.weight'''), ('''transformer.decoder.ref_point_head.layers.1.bias''', '''decoder.ref_point_head.layers.1.bias'''), ('''transformer.decoder.query_scale.layers.0.weight''', '''decoder.query_scale.layers.0.weight'''), ('''transformer.decoder.query_scale.layers.0.bias''', '''decoder.query_scale.layers.0.bias'''), ('''transformer.decoder.query_scale.layers.1.weight''', '''decoder.query_scale.layers.1.weight'''), ('''transformer.decoder.query_scale.layers.1.bias''', '''decoder.query_scale.layers.1.bias'''), ('''transformer.decoder.layers.0.ca_qpos_proj.weight''', '''decoder.layers.0.ca_qpos_proj.weight'''), ('''transformer.decoder.layers.0.ca_qpos_proj.bias''', '''decoder.layers.0.ca_qpos_proj.bias'''), ] ) def UpperCamelCase ( _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : int, _lowerCAmelCase : Optional[int] ) -> Dict: _UpperCAmelCase : Union[str, Any] = state_dict.pop(_lowerCAmelCase ) _UpperCAmelCase : Union[str, Any] = val def UpperCamelCase ( _lowerCAmelCase : List[Any] ) -> List[str]: _UpperCAmelCase : Union[str, Any] = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: _UpperCAmelCase : Tuple = key.replace("""backbone.0.body""", """backbone.conv_encoder.model""" ) _UpperCAmelCase : Any = value else: _UpperCAmelCase : List[Any] = value return new_state_dict def UpperCamelCase ( _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : Tuple=False ) -> Optional[Any]: _UpperCAmelCase : int = """""" if is_panoptic: _UpperCAmelCase : str = """conditional_detr.""" # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) _UpperCAmelCase : Dict = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' ) _UpperCAmelCase : Union[str, Any] = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict _UpperCAmelCase : Any = in_proj_weight[:256, :] _UpperCAmelCase : Tuple = in_proj_bias[:256] _UpperCAmelCase : Optional[int] = in_proj_weight[256:512, :] _UpperCAmelCase : str = in_proj_bias[256:512] _UpperCAmelCase : int = in_proj_weight[-256:, :] _UpperCAmelCase : List[Any] = in_proj_bias[-256:] def UpperCamelCase ( ) -> Any: _UpperCAmelCase : str = """http://images.cocodataset.org/val2017/000000039769.jpg""" _UpperCAmelCase : Dict = Image.open(requests.get(_lowerCAmelCase, stream=_lowerCAmelCase ).raw ) return im @torch.no_grad() def UpperCamelCase ( _lowerCAmelCase : Tuple, _lowerCAmelCase : Any ) -> List[Any]: _UpperCAmelCase : str = ConditionalDetrConfig() # set backbone and dilation attributes if "resnet101" in model_name: _UpperCAmelCase : Dict = """resnet101""" if "dc5" in model_name: _UpperCAmelCase : Union[str, Any] = True _UpperCAmelCase : Optional[Any] = """panoptic""" in model_name if is_panoptic: _UpperCAmelCase : Optional[int] = 250 else: _UpperCAmelCase : str = 91 _UpperCAmelCase : Optional[int] = """huggingface/label-files""" _UpperCAmelCase : str = """coco-detection-id2label.json""" _UpperCAmelCase : Any = json.load(open(hf_hub_download(_lowerCAmelCase, _lowerCAmelCase, repo_type="""dataset""" ), """r""" ) ) _UpperCAmelCase : Union[str, Any] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()} _UpperCAmelCase : List[str] = idalabel _UpperCAmelCase : Optional[int] = {v: k for k, v in idalabel.items()} # load image processor _UpperCAmelCase : Optional[int] = """coco_panoptic""" if is_panoptic else """coco_detection""" _UpperCAmelCase : int = ConditionalDetrImageProcessor(format=_lowerCAmelCase ) # prepare image _UpperCAmelCase : List[str] = prepare_img() _UpperCAmelCase : Any = image_processor(images=_lowerCAmelCase, return_tensors="""pt""" ) _UpperCAmelCase : Any = encoding["""pixel_values"""] logger.info(f'''Converting model {model_name}...''' ) # load original model from torch hub _UpperCAmelCase : Tuple = torch.hub.load("""DeppMeng/ConditionalDETR""", _lowerCAmelCase, pretrained=_lowerCAmelCase ).eval() _UpperCAmelCase : Tuple = conditional_detr.state_dict() # rename keys for src, dest in rename_keys: if is_panoptic: _UpperCAmelCase : Optional[int] = """conditional_detr.""" + src rename_key(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase ) _UpperCAmelCase : Optional[Any] = rename_backbone_keys(_lowerCAmelCase ) # query, key and value matrices need special treatment read_in_q_k_v(_lowerCAmelCase, is_panoptic=_lowerCAmelCase ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them _UpperCAmelCase : List[str] = """conditional_detr.model.""" if is_panoptic else """model.""" for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith("""conditional_detr""" ) and not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ) ): _UpperCAmelCase : Tuple = state_dict.pop(_lowerCAmelCase ) _UpperCAmelCase : Any = val elif "class_labels_classifier" in key or "bbox_predictor" in key: _UpperCAmelCase : Optional[Any] = state_dict.pop(_lowerCAmelCase ) _UpperCAmelCase : Union[str, Any] = val elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ): continue else: _UpperCAmelCase : Tuple = state_dict.pop(_lowerCAmelCase ) _UpperCAmelCase : Optional[int] = val else: if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ): _UpperCAmelCase : Tuple = state_dict.pop(_lowerCAmelCase ) _UpperCAmelCase : Any = val # finally, create HuggingFace model and load state dict _UpperCAmelCase : Union[str, Any] = ConditionalDetrForSegmentation(_lowerCAmelCase ) if is_panoptic else ConditionalDetrForObjectDetection(_lowerCAmelCase ) model.load_state_dict(_lowerCAmelCase ) model.eval() model.push_to_hub(repo_id=_lowerCAmelCase, organization="""DepuMeng""", commit_message="""Add model""" ) # verify our conversion _UpperCAmelCase : Any = conditional_detr(_lowerCAmelCase ) _UpperCAmelCase : int = model(_lowerCAmelCase ) assert torch.allclose(outputs.logits, original_outputs["""pred_logits"""], atol=1E-4 ) assert torch.allclose(outputs.pred_boxes, original_outputs["""pred_boxes"""], atol=1E-4 ) if is_panoptic: assert torch.allclose(outputs.pred_masks, original_outputs["""pred_masks"""], atol=1E-4 ) # Save model and image processor logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' ) Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase ) model.save_pretrained(_lowerCAmelCase ) image_processor.save_pretrained(_lowerCAmelCase ) if __name__ == "__main__": lowerCamelCase__ : List[str] = argparse.ArgumentParser() parser.add_argument( '''--model_name''', default='''conditional_detr_resnet50''', type=str, help='''Name of the CONDITIONAL_DETR model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) lowerCamelCase__ : int = parser.parse_args() convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
246
0
"""simple docstring""" import os import tempfile import unittest import numpy as np from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline @require_flax class UpperCAmelCase (unittest.TestCase ): """simple docstring""" def _snake_case ( self ): with tempfile.TemporaryDirectory() as tmpdirname: # pipeline has Flax weights lowercase__: List[str] = FlaxDiffusionPipeline.from_pretrained( '''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=_UpperCAmelCase , cache_dir=_UpperCAmelCase ) lowercase__: List[str] = [t[-1] for t in os.walk(os.path.join(_UpperCAmelCase , os.listdir(_UpperCAmelCase )[0] , '''snapshots''' ) )] lowercase__: int = [item for sublist in all_root_files for item in sublist] # None of the downloaded files should be a PyTorch file even if we have some here: # https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin assert not any(f.endswith('''.bin''' ) for f in files ) @slow @require_flax class UpperCAmelCase (unittest.TestCase ): """simple docstring""" def _snake_case ( self ): lowercase__, lowercase__: Optional[Any] = FlaxStableDiffusionPipeline.from_pretrained( '''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=_UpperCAmelCase ) lowercase__: List[Any] = ( '''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of''' ''' field, close up, split lighting, cinematic''' ) lowercase__: int = jax.random.PRNGKey(0 ) lowercase__: Tuple = 4 lowercase__: List[Any] = jax.device_count() lowercase__: Optional[Any] = num_samples * [prompt] lowercase__: Dict = pipeline.prepare_inputs(_UpperCAmelCase ) # shard inputs and rng lowercase__: List[str] = replicate(_UpperCAmelCase ) lowercase__: List[str] = jax.random.split(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Tuple = shard(_UpperCAmelCase ) lowercase__: List[Any] = pipeline(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , jit=_UpperCAmelCase ).images assert images.shape == (num_samples, 1, 64, 64, 3) if jax.device_count() == 8: assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_514_745 ) < 1e-3 assert np.abs(np.abs(_UpperCAmelCase , dtype=np.floataa ).sum() - 49_947.875 ) < 5e-1 lowercase__: List[Any] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) ) assert len(_UpperCAmelCase ) == num_samples def _snake_case ( self ): lowercase__, lowercase__: Any = FlaxStableDiffusionPipeline.from_pretrained( '''CompVis/stable-diffusion-v1-4''' , revision='''flax''' , safety_checker=_UpperCAmelCase ) lowercase__: Optional[Any] = ( '''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of''' ''' field, close up, split lighting, cinematic''' ) lowercase__: List[Any] = jax.random.PRNGKey(0 ) lowercase__: int = 50 lowercase__: str = jax.device_count() lowercase__: int = num_samples * [prompt] lowercase__: Any = pipeline.prepare_inputs(_UpperCAmelCase ) # shard inputs and rng lowercase__: int = replicate(_UpperCAmelCase ) lowercase__: Dict = jax.random.split(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Optional[int] = shard(_UpperCAmelCase ) lowercase__: Tuple = pipeline(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , jit=_UpperCAmelCase ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05_652_401) ) < 1e-3 assert np.abs((np.abs(_UpperCAmelCase , dtype=np.floataa ).sum() - 2_383_808.2) ) < 5e-1 def _snake_case ( self ): lowercase__, lowercase__: List[str] = FlaxStableDiffusionPipeline.from_pretrained( '''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=_UpperCAmelCase ) lowercase__: Union[str, Any] = ( '''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of''' ''' field, close up, split lighting, cinematic''' ) lowercase__: List[Any] = jax.random.PRNGKey(0 ) lowercase__: Optional[Any] = 50 lowercase__: Optional[int] = jax.device_count() lowercase__: List[str] = num_samples * [prompt] lowercase__: Any = pipeline.prepare_inputs(_UpperCAmelCase ) # shard inputs and rng lowercase__: Optional[int] = replicate(_UpperCAmelCase ) lowercase__: Any = jax.random.split(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: List[str] = shard(_UpperCAmelCase ) lowercase__: str = pipeline(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , jit=_UpperCAmelCase ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_003_906) ) < 1e-3 assert np.abs((np.abs(_UpperCAmelCase , dtype=np.floataa ).sum() - 2_373_516.75) ) < 5e-1 def _snake_case ( self ): lowercase__, lowercase__: List[str] = FlaxStableDiffusionPipeline.from_pretrained( '''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa ) lowercase__: List[Any] = ( '''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of''' ''' field, close up, split lighting, cinematic''' ) lowercase__: List[str] = jax.random.PRNGKey(0 ) lowercase__: List[Any] = 50 lowercase__: List[Any] = jax.device_count() lowercase__: Optional[int] = num_samples * [prompt] lowercase__: Tuple = pipeline.prepare_inputs(_UpperCAmelCase ) # shard inputs and rng lowercase__: Any = replicate(_UpperCAmelCase ) lowercase__: int = jax.random.split(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Optional[int] = shard(_UpperCAmelCase ) lowercase__: Any = pipeline(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , jit=_UpperCAmelCase ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_003_906) ) < 1e-3 assert np.abs((np.abs(_UpperCAmelCase , dtype=np.floataa ).sum() - 2_373_516.75) ) < 5e-1 def _snake_case ( self ): lowercase__: Optional[int] = FlaxDDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , set_alpha_to_one=_UpperCAmelCase , steps_offset=1 , ) lowercase__, lowercase__: List[Any] = FlaxStableDiffusionPipeline.from_pretrained( '''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , scheduler=_UpperCAmelCase , safety_checker=_UpperCAmelCase , ) lowercase__: List[Any] = scheduler.create_state() lowercase__: Tuple = scheduler_state lowercase__: Optional[Any] = ( '''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of''' ''' field, close up, split lighting, cinematic''' ) lowercase__: str = jax.random.PRNGKey(0 ) lowercase__: Dict = 50 lowercase__: Tuple = jax.device_count() lowercase__: str = num_samples * [prompt] lowercase__: Dict = pipeline.prepare_inputs(_UpperCAmelCase ) # shard inputs and rng lowercase__: Union[str, Any] = replicate(_UpperCAmelCase ) lowercase__: str = jax.random.split(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Union[str, Any] = shard(_UpperCAmelCase ) lowercase__: List[str] = pipeline(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , jit=_UpperCAmelCase ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.045_043_945) ) < 1e-3 assert np.abs((np.abs(_UpperCAmelCase , dtype=np.floataa ).sum() - 2_347_693.5) ) < 5e-1 def _snake_case ( self ): lowercase__: Optional[Any] = ( '''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of''' ''' field, close up, split lighting, cinematic''' ) lowercase__: str = jax.device_count() lowercase__: List[str] = num_samples * [prompt] lowercase__: Any = jax.random.split(jax.random.PRNGKey(0 ) , _UpperCAmelCase ) lowercase__, lowercase__: Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained( '''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=_UpperCAmelCase , ) lowercase__: List[str] = replicate(_UpperCAmelCase ) lowercase__: str = pipeline.prepare_inputs(_UpperCAmelCase ) lowercase__: Any = shard(_UpperCAmelCase ) lowercase__: Union[str, Any] = pipeline(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , jit=_UpperCAmelCase ).images assert images.shape == (num_samples, 1, 512, 512, 3) lowercase__: Optional[int] = images[2, 0, 256, 10:17, 1] # With memory efficient attention lowercase__, lowercase__: Optional[Any] = FlaxStableDiffusionPipeline.from_pretrained( '''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=_UpperCAmelCase , use_memory_efficient_attention=_UpperCAmelCase , ) lowercase__: Optional[Any] = replicate(_UpperCAmelCase ) lowercase__: int = pipeline.prepare_inputs(_UpperCAmelCase ) lowercase__: Optional[Any] = shard(_UpperCAmelCase ) lowercase__: List[str] = pipeline(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , jit=_UpperCAmelCase ).images assert images_eff.shape == (num_samples, 1, 512, 512, 3) lowercase__: List[Any] = images[2, 0, 256, 10:17, 1] # I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum` # over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now. assert abs(slice_eff - slice ).max() < 1e-2
2
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { "microsoft/beit-base-patch16-224-pt22k": ( "https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json" ), # See all BEiT models at https://huggingface.co/models?filter=beit } class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :Optional[Any] = "beit" def __init__( self , _UpperCAmelCase=8192 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=224 , _UpperCAmelCase=16 , _UpperCAmelCase=3 , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=True , _UpperCAmelCase=[3, 5, 7, 11] , _UpperCAmelCase=[1, 2, 3, 6] , _UpperCAmelCase=True , _UpperCAmelCase=0.4 , _UpperCAmelCase=256 , _UpperCAmelCase=1 , _UpperCAmelCase=False , _UpperCAmelCase=255 , **_UpperCAmelCase , ): super().__init__(**_UpperCAmelCase ) lowercase__: Union[str, Any] = vocab_size lowercase__: List[Any] = hidden_size lowercase__: Optional[int] = num_hidden_layers lowercase__: Optional[int] = num_attention_heads lowercase__: int = intermediate_size lowercase__: List[str] = hidden_act lowercase__: List[Any] = hidden_dropout_prob lowercase__: Dict = attention_probs_dropout_prob lowercase__: List[str] = initializer_range lowercase__: Optional[int] = layer_norm_eps lowercase__: int = image_size lowercase__: Tuple = patch_size lowercase__: int = num_channels lowercase__: Optional[Any] = use_mask_token lowercase__: List[Any] = use_absolute_position_embeddings lowercase__: Optional[int] = use_relative_position_bias lowercase__: Optional[int] = use_shared_relative_position_bias lowercase__: Optional[Any] = layer_scale_init_value lowercase__: Union[str, Any] = drop_path_rate lowercase__: Tuple = use_mean_pooling # decode head attributes (semantic segmentation) lowercase__: Tuple = out_indices lowercase__: Optional[int] = pool_scales # auxiliary head attributes (semantic segmentation) lowercase__: List[str] = use_auxiliary_head lowercase__: Optional[Any] = auxiliary_loss_weight lowercase__: str = auxiliary_channels lowercase__: List[str] = auxiliary_num_convs lowercase__: Tuple = auxiliary_concat_input lowercase__: Dict = semantic_loss_ignore_index class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :Dict = version.parse("1.11" ) @property def _snake_case ( self ): return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def _snake_case ( self ): return 1e-4
2
1
'''simple docstring''' import unittest import numpy as np def _lowerCamelCase ( lowercase : np.ndarray , lowercase : np.ndarray , lowercase : np.ndarray , lowercase : np.ndarray | None = None , ) -> np.ndarray: _a = np.shape(lowercase ) _a = np.shape(lowercase ) _a = np.shape(lowercase ) if shape_a[0] != shape_b[0]: _a = ( "Expected the same number of rows for A and B. " F'Instead found A of size {shape_a} and B of size {shape_b}' ) raise ValueError(lowercase ) if shape_b[1] != shape_c[1]: _a = ( "Expected the same number of columns for B and C. " F'Instead found B of size {shape_b} and C of size {shape_c}' ) raise ValueError(lowercase ) _a = pseudo_inv if a_inv is None: try: _a = np.linalg.inv(lowercase ) except np.linalg.LinAlgError: raise ValueError( "Input matrix A is not invertible. Cannot compute Schur complement." ) return mat_c - mat_b.T @ a_inv @ mat_b class __SCREAMING_SNAKE_CASE (unittest.TestCase ): """simple docstring""" def UpperCamelCase__ ( self : int ): _a = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) _a = np.array([[0, 3], [3, 0], [2, 3]] ) _a = np.array([[2, 1], [6, 3]] ) _a = schur_complement(__a , __a , __a ) _a = np.block([[a, b], [b.T, c]] ) _a = np.linalg.det(__a ) _a = np.linalg.det(__a ) _a = np.linalg.det(__a ) self.assertAlmostEqual(__a , det_a * det_s ) def UpperCamelCase__ ( self : str ): _a = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) _a = np.array([[0, 3], [3, 0], [2, 3]] ) _a = np.array([[2, 1], [6, 3]] ) with self.assertRaises(__a ): schur_complement(__a , __a , __a ) def UpperCamelCase__ ( self : Optional[int] ): _a = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) _a = np.array([[0, 3], [3, 0], [2, 3]] ) _a = np.array([[2, 1, 3], [6, 3, 5]] ) with self.assertRaises(__a ): schur_complement(__a , __a , __a ) if __name__ == "__main__": import doctest doctest.testmod() unittest.main()
63
'''simple docstring''' from __future__ import annotations def _UpperCamelCase ( UpperCamelCase__ ): return [ord(UpperCamelCase__ ) - 9_6 for elem in plain] def _UpperCamelCase ( UpperCamelCase__ ): return "".join(chr(elem + 9_6 ) for elem in encoded ) def _UpperCamelCase ( ): UpperCAmelCase__ : int = encode(input("""-> """ ).strip().lower() ) print("""Encoded: """ , UpperCamelCase__ ) print("""Decoded:""" , decode(UpperCamelCase__ ) ) if __name__ == "__main__": main()
163
0
import os import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from huggingface_hub.file_download import http_get from requests.exceptions import HTTPError from transformers import ( AlbertTokenizer, AutoTokenizer, BertTokenizer, BertTokenizerFast, GPTaTokenizerFast, is_tokenizers_available, ) from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers from transformers.tokenization_utils import Trie sys.path.append(str(Path(__file__).parent.parent / """utils""")) from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class __SCREAMING_SNAKE_CASE ( unittest.TestCase): def UpperCamelCase__ ( self ): """simple docstring""" # A mock response for an HTTP head request to emulate server down lowerCAmelCase__ = mock.Mock() lowerCAmelCase__ = 5_00 lowerCAmelCase__ = {} lowerCAmelCase__ = HTTPError lowerCAmelCase__ = {} # Download this model to make sure it's in the cache. lowerCAmelCase__ = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' ) # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch('requests.Session.request' , return_value=__snake_case ) as mock_head: lowerCAmelCase__ = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' ) # This check we did call the fake head request mock_head.assert_called() @require_tokenizers def UpperCamelCase__ ( self ): """simple docstring""" # A mock response for an HTTP head request to emulate server down lowerCAmelCase__ = mock.Mock() lowerCAmelCase__ = 5_00 lowerCAmelCase__ = {} lowerCAmelCase__ = HTTPError lowerCAmelCase__ = {} # Download this model to make sure it's in the cache. lowerCAmelCase__ = GPTaTokenizerFast.from_pretrained('gpt2' ) # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch('requests.Session.request' , return_value=__snake_case ) as mock_head: lowerCAmelCase__ = GPTaTokenizerFast.from_pretrained('gpt2' ) # This check we did call the fake head request mock_head.assert_called() def UpperCamelCase__ ( self ): """simple docstring""" # This test is for deprecated behavior and can be removed in v5 try: lowerCAmelCase__ = tempfile.mktemp() with open(__snake_case , 'wb' ) as f: http_get('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' , __snake_case ) lowerCAmelCase__ = AlbertTokenizer.from_pretrained(__snake_case ) finally: os.remove(__snake_case ) # Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in # the current folder and have the right name. if os.path.isfile('tokenizer.json' ): # We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it. return try: with open('tokenizer.json' , 'wb' ) as f: http_get('https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json' , __snake_case ) lowerCAmelCase__ = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' ) # The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000 self.assertEqual(tokenizer.vocab_size , 10_00 ) # Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file. finally: os.remove('tokenizer.json' ) def UpperCamelCase__ ( self ): """simple docstring""" # This test is for deprecated behavior and can be removed in v5 lowerCAmelCase__ = AlbertTokenizer.from_pretrained('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' ) @is_staging_test class __SCREAMING_SNAKE_CASE ( unittest.TestCase): _SCREAMING_SNAKE_CASE : Optional[Any] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou'''] @classmethod def UpperCamelCase__ ( cls ): """simple docstring""" lowerCAmelCase__ = TOKEN HfFolder.save_token(__snake_case ) @classmethod def UpperCamelCase__ ( cls ): """simple docstring""" try: delete_repo(token=cls._token , repo_id='test-tokenizer' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='valid_org/test-tokenizer-org' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='test-dynamic-tokenizer' ) except HTTPError: pass def UpperCamelCase__ ( self ): """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: lowerCAmelCase__ = os.path.join(__snake_case , 'vocab.txt' ) with open(__snake_case , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) ) lowerCAmelCase__ = BertTokenizer(__snake_case ) tokenizer.push_to_hub('test-tokenizer' , use_auth_token=self._token ) lowerCAmelCase__ = BertTokenizer.from_pretrained(F"{USER}/test-tokenizer" ) self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab ) # Reset repo delete_repo(token=self._token , repo_id='test-tokenizer' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(__snake_case , repo_id='test-tokenizer' , push_to_hub=__snake_case , use_auth_token=self._token ) lowerCAmelCase__ = BertTokenizer.from_pretrained(F"{USER}/test-tokenizer" ) self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab ) def UpperCamelCase__ ( self ): """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: lowerCAmelCase__ = os.path.join(__snake_case , 'vocab.txt' ) with open(__snake_case , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) ) lowerCAmelCase__ = BertTokenizer(__snake_case ) tokenizer.push_to_hub('valid_org/test-tokenizer-org' , use_auth_token=self._token ) lowerCAmelCase__ = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' ) self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab ) # Reset repo delete_repo(token=self._token , repo_id='valid_org/test-tokenizer-org' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained( __snake_case , repo_id='valid_org/test-tokenizer-org' , push_to_hub=__snake_case , use_auth_token=self._token ) lowerCAmelCase__ = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' ) self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab ) @require_tokenizers def UpperCamelCase__ ( self ): """simple docstring""" CustomTokenizer.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: lowerCAmelCase__ = os.path.join(__snake_case , 'vocab.txt' ) with open(__snake_case , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) ) lowerCAmelCase__ = CustomTokenizer(__snake_case ) # No fast custom tokenizer tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token ) lowerCAmelCase__ = AutoTokenizer.from_pretrained(F"{USER}/test-dynamic-tokenizer" , trust_remote_code=__snake_case ) # Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer' ) # Fast and slow custom tokenizer CustomTokenizerFast.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: lowerCAmelCase__ = os.path.join(__snake_case , 'vocab.txt' ) with open(__snake_case , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) ) lowerCAmelCase__ = BertTokenizerFast.from_pretrained(__snake_case ) bert_tokenizer.save_pretrained(__snake_case ) lowerCAmelCase__ = CustomTokenizerFast.from_pretrained(__snake_case ) tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token ) lowerCAmelCase__ = AutoTokenizer.from_pretrained(F"{USER}/test-dynamic-tokenizer" , trust_remote_code=__snake_case ) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizerFast' ) lowerCAmelCase__ = AutoTokenizer.from_pretrained( F"{USER}/test-dynamic-tokenizer" , use_fast=__snake_case , trust_remote_code=__snake_case ) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer' ) class __SCREAMING_SNAKE_CASE ( unittest.TestCase): def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase__ = Trie() trie.add('Hello 友達' ) self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {' ': {'友': {'達': {'': 1}}}}}}}}} ) trie.add('Hello' ) trie.data self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {'': 1, ' ': {'友': {'達': {'': 1}}}}}}}}} ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase__ = Trie() self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) , ['[CLS] This is a extra_id_100'] ) trie.add('[CLS]' ) trie.add('extra_id_1' ) trie.add('extra_id_100' ) self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) , ['[CLS]', ' This is a ', 'extra_id_100'] ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase__ = Trie() trie.add('A' ) self.assertEqual(trie.split('ABC' ) , ['A', 'BC'] ) self.assertEqual(trie.split('BCA' ) , ['BC', 'A'] ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase__ = Trie() trie.add('TOKEN]' ) trie.add('[SPECIAL_TOKEN]' ) self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) , ['This is something ', '[SPECIAL_TOKEN]'] ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase__ = Trie() trie.add('A' ) trie.add('P' ) trie.add('[SPECIAL_TOKEN]' ) self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) , ['This is something ', '[SPECIAL_TOKEN]'] ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase__ = Trie() trie.add('AB' ) trie.add('B' ) trie.add('C' ) self.assertEqual(trie.split('ABC' ) , ['AB', 'C'] ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase__ = Trie() trie.add('ABC' ) trie.add('B' ) trie.add('CD' ) self.assertEqual(trie.split('ABCD' ) , ['ABC', 'D'] ) def UpperCamelCase__ ( self ): """simple docstring""" # Even if the offsets are wrong, we necessarily output correct string # parts. lowerCAmelCase__ = Trie() lowerCAmelCase__ = trie.cut_text('ABC' , [0, 0, 2, 1, 2, 3] ) self.assertEqual(__snake_case , ['AB', 'C'] )
353
from collections.abc import Iterator, MutableMapping from dataclasses import dataclass from typing import Generic, TypeVar __snake_case : Optional[Any] = TypeVar("""KEY""") __snake_case : str = TypeVar("""VAL""") @dataclass(frozen=__lowercase , slots=__lowercase) class __SCREAMING_SNAKE_CASE ( Generic[KEY, VAL]): _SCREAMING_SNAKE_CASE : KEY _SCREAMING_SNAKE_CASE : VAL class __SCREAMING_SNAKE_CASE ( _Item): def __init__( self ): """simple docstring""" super().__init__(_UpperCamelCase , _UpperCamelCase ) def __bool__( self ): """simple docstring""" return False __snake_case : int = _DeletedItem() class __SCREAMING_SNAKE_CASE ( MutableMapping[KEY, VAL]): def __init__( self , _UpperCamelCase = 8 , _UpperCamelCase = 0.75 ): """simple docstring""" lowerCAmelCase__ = initial_block_size lowerCAmelCase__ = [None] * initial_block_size assert 0.0 < capacity_factor < 1.0 lowerCAmelCase__ = capacity_factor lowerCAmelCase__ = 0 def UpperCamelCase__ ( self , _UpperCamelCase ): """simple docstring""" return hash(_UpperCamelCase ) % len(self._buckets ) def UpperCamelCase__ ( self , _UpperCamelCase ): """simple docstring""" return (ind + 1) % len(self._buckets ) def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): """simple docstring""" lowerCAmelCase__ = self._buckets[ind] if not stored: lowerCAmelCase__ = _Item(_UpperCamelCase , _UpperCamelCase ) self._len += 1 return True elif stored.key == key: lowerCAmelCase__ = _Item(_UpperCamelCase , _UpperCamelCase ) return True else: return False def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase__ = len(self._buckets ) * self._capacity_factor return len(self ) >= int(_UpperCamelCase ) def UpperCamelCase__ ( self ): """simple docstring""" if len(self._buckets ) <= self._initial_block_size: return False lowerCAmelCase__ = len(self._buckets ) * self._capacity_factor / 2 return len(self ) < limit def UpperCamelCase__ ( self , _UpperCamelCase ): """simple docstring""" lowerCAmelCase__ = self._buckets lowerCAmelCase__ = [None] * new_size lowerCAmelCase__ = 0 for item in old_buckets: if item: self._add_item(item.key , item.val ) def UpperCamelCase__ ( self ): """simple docstring""" self._resize(len(self._buckets ) * 2 ) def UpperCamelCase__ ( self ): """simple docstring""" self._resize(len(self._buckets ) // 2 ) def UpperCamelCase__ ( self , _UpperCamelCase ): """simple docstring""" lowerCAmelCase__ = self._get_bucket_index(_UpperCamelCase ) for _ in range(len(self._buckets ) ): yield ind lowerCAmelCase__ = self._get_next_ind(_UpperCamelCase ) def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase ): """simple docstring""" for ind in self._iterate_buckets(_UpperCamelCase ): if self._try_set(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): break def __setitem__( self , _UpperCamelCase , _UpperCamelCase ): """simple docstring""" if self._is_full(): self._size_up() self._add_item(_UpperCamelCase , _UpperCamelCase ) def __delitem__( self , _UpperCamelCase ): """simple docstring""" for ind in self._iterate_buckets(_UpperCamelCase ): lowerCAmelCase__ = self._buckets[ind] if item is None: raise KeyError(_UpperCamelCase ) if item is _deleted: continue if item.key == key: lowerCAmelCase__ = _deleted self._len -= 1 break if self._is_sparse(): self._size_down() def __getitem__( self , _UpperCamelCase ): """simple docstring""" for ind in self._iterate_buckets(_UpperCamelCase ): lowerCAmelCase__ = self._buckets[ind] if item is None: break if item is _deleted: continue if item.key == key: return item.val raise KeyError(_UpperCamelCase ) def __len__( self ): """simple docstring""" return self._len def __iter__( self ): """simple docstring""" yield from (item.key for item in self._buckets if item) def __repr__( self ): """simple docstring""" lowerCAmelCase__ = ' ,'.join( F"{item.key}: {item.val}" for item in self._buckets if item ) return F"HashMap({val_string})"
122
0
from __future__ import annotations from itertools import permutations from random import randint from timeit import repeat def __SCREAMING_SNAKE_CASE ( ) -> tuple[list[int], int]: __lowercase = [randint(-1000 , 1000 ) for i in range(10 )] __lowercase = randint(-5000 , 5000 ) return (arr, r) SCREAMING_SNAKE_CASE__ = make_dataset() def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : int ) -> tuple[int, ...]: for triplet in permutations(SCREAMING_SNAKE_CASE , 3 ): if sum(SCREAMING_SNAKE_CASE ) == target: return tuple(sorted(SCREAMING_SNAKE_CASE ) ) return (0, 0, 0) def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : int ) -> tuple[int, int, int]: arr.sort() __lowercase = len(SCREAMING_SNAKE_CASE ) for i in range(n - 1 ): __lowercase , __lowercase = i + 1, n - 1 while left < right: if arr[i] + arr[left] + arr[right] == target: return (arr[i], arr[left], arr[right]) elif arr[i] + arr[left] + arr[right] < target: left += 1 elif arr[i] + arr[left] + arr[right] > target: right -= 1 return (0, 0, 0) def __SCREAMING_SNAKE_CASE ( ) -> tuple[float, float]: __lowercase = '\nfrom __main__ import dataset, triplet_sum1, triplet_sum2\n' __lowercase = '\ntriplet_sum1(*dataset)\n' __lowercase = '\ntriplet_sum2(*dataset)\n' __lowercase = repeat(setup=SCREAMING_SNAKE_CASE , stmt=SCREAMING_SNAKE_CASE , repeat=5 , number=10000 ) __lowercase = repeat(setup=SCREAMING_SNAKE_CASE , stmt=SCREAMING_SNAKE_CASE , repeat=5 , number=10000 ) return (min(SCREAMING_SNAKE_CASE ), min(SCREAMING_SNAKE_CASE )) if __name__ == "__main__": from doctest import testmod testmod() SCREAMING_SNAKE_CASE__ = solution_times() print(F'''The time for naive implementation is {times[0]}.''') print(F'''The time for optimized implementation is {times[1]}.''')
325
import copy from typing import Dict, List, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING SCREAMING_SNAKE_CASE__ = { """facebook/mask2former-swin-small-coco-instance""": ( """https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json""" ) # See all Mask2Former models at https://huggingface.co/models?filter=mask2former } SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) class A__ ( lowerCAmelCase__ ): lowerCAmelCase__ : Tuple = "mask2former" lowerCAmelCase__ : List[Any] = ["swin"] lowerCAmelCase__ : str = {"hidden_size": "hidden_dim"} def __init__( self : Optional[int] , _UpperCAmelCase : Optional[Dict] = None , _UpperCAmelCase : int = 2_56 , _UpperCAmelCase : int = 2_56 , _UpperCAmelCase : int = 2_56 , _UpperCAmelCase : int = 10_24 , _UpperCAmelCase : str = "relu" , _UpperCAmelCase : int = 6 , _UpperCAmelCase : int = 10 , _UpperCAmelCase : int = 8 , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : int = 20_48 , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : int = 4 , _UpperCAmelCase : int = 2_55 , _UpperCAmelCase : int = 1_00 , _UpperCAmelCase : float = 0.1 , _UpperCAmelCase : float = 2.0 , _UpperCAmelCase : float = 5.0 , _UpperCAmelCase : float = 5.0 , _UpperCAmelCase : int = 1_25_44 , _UpperCAmelCase : float = 3.0 , _UpperCAmelCase : float = 0.75 , _UpperCAmelCase : float = 0.02 , _UpperCAmelCase : float = 1.0 , _UpperCAmelCase : bool = True , _UpperCAmelCase : List[int] = [4, 8, 16, 32] , _UpperCAmelCase : bool = None , **_UpperCAmelCase : List[str] , ) -> int: """simple docstring""" if backbone_config is None: logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.' ) __lowercase = CONFIG_MAPPING['swin']( image_size=2_24 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=_UpperCAmelCase , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ): __lowercase = backbone_config.pop('model_type' ) __lowercase = CONFIG_MAPPING[backbone_model_type] __lowercase = config_class.from_dict(_UpperCAmelCase ) # verify that the backbone is supported if backbone_config.model_type not in self.backbones_supported: logger.warning_once( f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. """ f"""Supported model types: {",".join(self.backbones_supported )}""" ) __lowercase = backbone_config __lowercase = feature_size __lowercase = mask_feature_size __lowercase = hidden_dim __lowercase = encoder_feedforward_dim __lowercase = activation_function __lowercase = encoder_layers __lowercase = decoder_layers __lowercase = num_attention_heads __lowercase = dropout __lowercase = dim_feedforward __lowercase = pre_norm __lowercase = enforce_input_projection __lowercase = common_stride __lowercase = ignore_value __lowercase = num_queries __lowercase = no_object_weight __lowercase = class_weight __lowercase = mask_weight __lowercase = dice_weight __lowercase = train_num_points __lowercase = oversample_ratio __lowercase = importance_sample_ratio __lowercase = init_std __lowercase = init_xavier_std __lowercase = use_auxiliary_loss __lowercase = feature_strides __lowercase = output_auxiliary_logits __lowercase = decoder_layers super().__init__(**_UpperCAmelCase ) @classmethod def a__ ( cls : Union[str, Any] , _UpperCAmelCase : PretrainedConfig , **_UpperCAmelCase : Optional[int] ) -> Dict: """simple docstring""" return cls( backbone_config=_UpperCAmelCase , **_UpperCAmelCase , ) def a__ ( self : str ) -> Dict[str, any]: """simple docstring""" __lowercase = copy.deepcopy(self.__dict__ ) __lowercase = self.backbone_config.to_dict() __lowercase = self.__class__.model_type return output
325
1
from __future__ import annotations from cmath import sqrt def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> tuple[complex, complex]: if a == 0: raise ValueError('Coefficient \'a\' must not be zero.' ) _lowercase : Any = b * b - 4 * a * c _lowercase : Union[str, Any] = (-b + sqrt(__lowerCAmelCase )) / (2 * a) _lowercase : Any = (-b - sqrt(__lowerCAmelCase )) / (2 * a) return ( root_a.real if not root_a.imag else root_a, root_a.real if not root_a.imag else root_a, ) def UpperCamelCase_( ) -> str: _lowercase , _lowercase : List[str] = quadratic_roots(a=5 , b=6 , c=1 ) print(F'''The solutions are: {solutiona} and {solutiona}''' ) if __name__ == "__main__": main()
353
from __future__ import annotations def UpperCamelCase_( lowerCamelCase_ ) -> int: _lowercase : Union[str, Any] = len(lowerCamelCase_ ) // 2 # choose the middle 3 elements _lowercase : Any = lst[m - 1 : m + 2] # if middle element is peak if three[1] > three[0] and three[1] > three[2]: return three[1] # if increasing, recurse on right elif three[0] < three[2]: if len(lst[:m] ) == 2: m -= 1 return peak(lst[m:] ) # decreasing else: if len(lst[:m] ) == 2: m += 1 return peak(lst[:m] ) if __name__ == "__main__": import doctest doctest.testmod()
84
0
"""simple docstring""" from typing import List, Optional, TypeVar from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .info import DatasetInfo from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets from .splits import NamedSplit from .utils import logging from .utils.py_utils import Literal a :str = logging.get_logger(__name__) a :List[Any] = TypeVar("DatasetType", Dataset, IterableDataset) def _lowercase ( __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = "first_exhausted" , ) -> DatasetType: from .arrow_dataset import Dataset from .iterable_dataset import IterableDataset if not datasets: raise ValueError("""Unable to interleave an empty list of datasets.""" ) for i, dataset in enumerate(__lowerCAmelCase ): if not isinstance(__lowerCAmelCase , (Dataset, IterableDataset) ): if isinstance(__lowerCAmelCase , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( F'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} ''' """is an empty dataset dictionary.""" ) raise ValueError( F'''Dataset at position {i} has at least one split: {list(__lowerCAmelCase )}\n''' F'''Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(__lowerCAmelCase ) )}\']''' ) raise ValueError( F'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__lowerCAmelCase ).__name__}.''' ) if i == 0: SCREAMING_SNAKE_CASE__ : Optional[int] = ( (Dataset, IterableDataset) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else (IterableDataset, Dataset) ) elif not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise ValueError( F'''Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.''' ) if stopping_strategy not in ["first_exhausted", "all_exhausted"]: raise ValueError(F'''{stopping_strategy} is not supported. Please enter a valid stopping_strategy.''' ) if dataset_type is Dataset: return _interleave_map_style_datasets( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , info=__lowerCAmelCase , split=__lowerCAmelCase , stopping_strategy=__lowerCAmelCase ) else: return _interleave_iterable_datasets( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , info=__lowerCAmelCase , split=__lowerCAmelCase , stopping_strategy=__lowerCAmelCase ) def _lowercase ( __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = 0 , ) -> DatasetType: if not dsets: raise ValueError("""Unable to concatenate an empty list of datasets.""" ) for i, dataset in enumerate(__lowerCAmelCase ): if not isinstance(__lowerCAmelCase , (Dataset, IterableDataset) ): if isinstance(__lowerCAmelCase , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( F'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} ''' """is an empty dataset dictionary.""" ) raise ValueError( F'''Dataset at position {i} has at least one split: {list(__lowerCAmelCase )}\n''' F'''Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(__lowerCAmelCase ) )}\']''' ) raise ValueError( F'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__lowerCAmelCase ).__name__}.''' ) if i == 0: SCREAMING_SNAKE_CASE__ : str = ( (Dataset, IterableDataset) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else (IterableDataset, Dataset) ) elif not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise ValueError( F'''Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.''' ) if dataset_type is Dataset: return _concatenate_map_style_datasets(__lowerCAmelCase , info=__lowerCAmelCase , split=__lowerCAmelCase , axis=__lowerCAmelCase ) else: return _concatenate_iterable_datasets(__lowerCAmelCase , info=__lowerCAmelCase , split=__lowerCAmelCase , axis=__lowerCAmelCase )
132
from dataclasses import asdict, dataclass from typing import Optional from ...configuration_utils import PretrainedConfig from ...utils import logging __snake_case = logging.get_logger(__name__) # TODO Update this __snake_case = { '''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''', # See all ESM models at https://huggingface.co/models?filter=esm } class __snake_case ( lowerCamelCase__ ): __lowerCamelCase : Tuple = """esm""" def __init__( self , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=1026 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__="absolute" , snake_case__=True , snake_case__=None , snake_case__=False , snake_case__=False , snake_case__=None , snake_case__=None , **snake_case__ , ) -> Union[str, Any]: '''simple docstring''' super().__init__(pad_token_id=snake_case__ , mask_token_id=snake_case__ , **snake_case__ ) UpperCAmelCase : List[str] =vocab_size UpperCAmelCase : str =hidden_size UpperCAmelCase : List[Any] =num_hidden_layers UpperCAmelCase : Optional[Any] =num_attention_heads UpperCAmelCase : str =intermediate_size UpperCAmelCase : Any =hidden_dropout_prob UpperCAmelCase : int =attention_probs_dropout_prob UpperCAmelCase : Dict =max_position_embeddings UpperCAmelCase : List[str] =initializer_range UpperCAmelCase : Union[str, Any] =layer_norm_eps UpperCAmelCase : Dict =position_embedding_type UpperCAmelCase : Optional[Any] =use_cache UpperCAmelCase : int =emb_layer_norm_before UpperCAmelCase : List[str] =token_dropout UpperCAmelCase : Optional[Any] =is_folding_model if is_folding_model: if esmfold_config is None: logger.info('''No esmfold_config supplied for folding model, using default values.''' ) UpperCAmelCase : Optional[Any] =EsmFoldConfig() elif isinstance(snake_case__ , snake_case__ ): UpperCAmelCase : Optional[int] =EsmFoldConfig(**snake_case__ ) UpperCAmelCase : Tuple =esmfold_config if vocab_list is None: logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' ) UpperCAmelCase : Any =get_default_vocab_list() else: UpperCAmelCase : Tuple =vocab_list else: UpperCAmelCase : Optional[int] =None UpperCAmelCase : Union[str, Any] =None if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , snake_case__ ): raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' ) def UpperCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' UpperCAmelCase : Union[str, Any] =super().to_dict() if isinstance(self.esmfold_config , snake_case__ ): UpperCAmelCase : str =self.esmfold_config.to_dict() return output @dataclass class __snake_case : __lowerCamelCase : str = None __lowerCamelCase : bool = True __lowerCamelCase : bool = False __lowerCamelCase : bool = False __lowerCamelCase : bool = False __lowerCamelCase : float = 0 __lowerCamelCase : bool = True __lowerCamelCase : bool = False __lowerCamelCase : int = 128 __lowerCamelCase : "TrunkConfig" = None def UpperCAmelCase__ ( self ) -> Union[str, Any]: '''simple docstring''' if self.trunk is None: UpperCAmelCase : str =TrunkConfig() elif isinstance(self.trunk , snake_case__ ): UpperCAmelCase : Optional[int] =TrunkConfig(**self.trunk ) def UpperCAmelCase__ ( self ) -> Any: '''simple docstring''' UpperCAmelCase : Optional[Any] =asdict(self ) UpperCAmelCase : Any =self.trunk.to_dict() return output @dataclass class __snake_case : __lowerCamelCase : int = 48 __lowerCamelCase : int = 1024 __lowerCamelCase : int = 128 __lowerCamelCase : int = 32 __lowerCamelCase : int = 32 __lowerCamelCase : int = 32 __lowerCamelCase : float = 0 __lowerCamelCase : float = 0 __lowerCamelCase : bool = False __lowerCamelCase : int = 4 __lowerCamelCase : Optional[int] = 128 __lowerCamelCase : "StructureModuleConfig" = None def UpperCAmelCase__ ( self ) -> List[str]: '''simple docstring''' if self.structure_module is None: UpperCAmelCase : Any =StructureModuleConfig() elif isinstance(self.structure_module , snake_case__ ): UpperCAmelCase : str =StructureModuleConfig(**self.structure_module ) if self.max_recycles <= 0: raise ValueError(f'''`max_recycles` should be positive, got {self.max_recycles}.''' ) if self.sequence_state_dim % self.sequence_state_dim != 0: raise ValueError( '''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got''' f''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' ) if self.pairwise_state_dim % self.pairwise_state_dim != 0: raise ValueError( '''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got''' f''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' ) UpperCAmelCase : Optional[int] =self.sequence_state_dim // self.sequence_head_width UpperCAmelCase : Any =self.pairwise_state_dim // self.pairwise_head_width if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width: raise ValueError( '''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got''' f''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' ) if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width: raise ValueError( '''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got''' f''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' ) if self.pairwise_state_dim % 2 != 0: raise ValueError(f'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' ) if self.dropout >= 0.4: raise ValueError(f'''`dropout` should not be greater than 0.4, got {self.dropout}.''' ) def UpperCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' UpperCAmelCase : Union[str, Any] =asdict(self ) UpperCAmelCase : Tuple =self.structure_module.to_dict() return output @dataclass class __snake_case : __lowerCamelCase : int = 384 __lowerCamelCase : int = 128 __lowerCamelCase : int = 16 __lowerCamelCase : int = 128 __lowerCamelCase : int = 12 __lowerCamelCase : int = 4 __lowerCamelCase : int = 8 __lowerCamelCase : float = 0.1 __lowerCamelCase : int = 8 __lowerCamelCase : int = 1 __lowerCamelCase : int = 2 __lowerCamelCase : int = 7 __lowerCamelCase : int = 10 __lowerCamelCase : float = 1E-8 __lowerCamelCase : float = 1E5 def UpperCAmelCase__ ( self ) -> Union[str, Any]: '''simple docstring''' return asdict(self ) def lowerCAmelCase_ ( )-> Tuple: '''simple docstring''' return ( "<cls>", "<pad>", "<eos>", "<unk>", "L", "A", "G", "V", "S", "E", "R", "T", "I", "D", "P", "K", "Q", "N", "F", "Y", "M", "H", "W", "C", "X", "B", "U", "Z", "O", ".", "-", "<null_1>", "<mask>", )
348
0
"""simple docstring""" import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""} UpperCAmelCase__ = { """vocab_file""": { """allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json""", """allenai/longformer-large-4096""": ( """https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json""" ), """allenai/longformer-large-4096-finetuned-triviaqa""": ( """https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json""" ), """allenai/longformer-base-4096-extra.pos.embd.only""": ( """https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json""" ), """allenai/longformer-large-4096-extra.pos.embd.only""": ( """https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json""" ), }, """merges_file""": { """allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt""", """allenai/longformer-large-4096""": ( """https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt""" ), """allenai/longformer-large-4096-finetuned-triviaqa""": ( """https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt""" ), """allenai/longformer-base-4096-extra.pos.embd.only""": ( """https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt""" ), """allenai/longformer-large-4096-extra.pos.embd.only""": ( """https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt""" ), }, } UpperCAmelCase__ = { """allenai/longformer-base-4096""": 4_0_9_6, """allenai/longformer-large-4096""": 4_0_9_6, """allenai/longformer-large-4096-finetuned-triviaqa""": 4_0_9_6, """allenai/longformer-base-4096-extra.pos.embd.only""": 4_0_9_6, """allenai/longformer-large-4096-extra.pos.embd.only""": 4_0_9_6, } @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def __UpperCAmelCase ( ): """simple docstring""" _UpperCAmelCase = ( list(range(ord("""!""" ) ,ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) ,ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) ,ord("""ÿ""" ) + 1 ) ) ) _UpperCAmelCase = bs[:] _UpperCAmelCase = 0 for b in range(2**8 ): if b not in bs: bs.append(lowercase ) cs.append(2**8 + n ) n += 1 _UpperCAmelCase = [chr(lowercase ) for n in cs] return dict(zip(lowercase ,lowercase ) ) def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = set() _UpperCAmelCase = word[0] for char in word[1:]: pairs.add((prev_char, char) ) _UpperCAmelCase = char return pairs class a ( lowerCAmelCase_ ): _snake_case : Optional[Any] = VOCAB_FILES_NAMES _snake_case : Optional[int] = PRETRAINED_VOCAB_FILES_MAP _snake_case : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _snake_case : List[Any] = ['input_ids', 'attention_mask'] def __init__( self : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[Any]="replace" , __lowerCAmelCase : Union[str, Any]="<s>" , __lowerCAmelCase : int="</s>" , __lowerCAmelCase : str="</s>" , __lowerCAmelCase : Any="<s>" , __lowerCAmelCase : str="<unk>" , __lowerCAmelCase : Optional[Any]="<pad>" , __lowerCAmelCase : List[Any]="<mask>" , __lowerCAmelCase : Dict=False , **__lowerCAmelCase : Union[str, Any] , ): _UpperCAmelCase = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else bos_token _UpperCAmelCase = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else eos_token _UpperCAmelCase = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else sep_token _UpperCAmelCase = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else cls_token _UpperCAmelCase = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else unk_token _UpperCAmelCase = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it _UpperCAmelCase = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else mask_token super().__init__( errors=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , **__lowerCAmelCase , ) with open(__lowerCAmelCase , encoding="""utf-8""" ) as vocab_handle: _UpperCAmelCase = json.load(__lowerCAmelCase ) _UpperCAmelCase = {v: k for k, v in self.encoder.items()} _UpperCAmelCase = errors # how to handle errors in decoding _UpperCAmelCase = bytes_to_unicode() _UpperCAmelCase = {v: k for k, v in self.byte_encoder.items()} with open(__lowerCAmelCase , encoding="""utf-8""" ) as merges_handle: _UpperCAmelCase = merges_handle.read().split("""\n""" )[1:-1] _UpperCAmelCase = [tuple(merge.split() ) for merge in bpe_merges] _UpperCAmelCase = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) ) _UpperCAmelCase = {} _UpperCAmelCase = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions _UpperCAmelCase = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" ) @property def lowerCAmelCase_ ( self : List[Any] ): return len(self.encoder ) def lowerCAmelCase_ ( self : Optional[int] ): return dict(self.encoder , **self.added_tokens_encoder ) def lowerCAmelCase_ ( self : str , __lowerCAmelCase : Tuple ): if token in self.cache: return self.cache[token] _UpperCAmelCase = tuple(__lowerCAmelCase ) _UpperCAmelCase = get_pairs(__lowerCAmelCase ) if not pairs: return token while True: _UpperCAmelCase = min(__lowerCAmelCase , key=lambda __lowerCAmelCase : self.bpe_ranks.get(__lowerCAmelCase , float("""inf""" ) ) ) if bigram not in self.bpe_ranks: break _UpperCAmelCase , _UpperCAmelCase = bigram _UpperCAmelCase = [] _UpperCAmelCase = 0 while i < len(__lowerCAmelCase ): try: _UpperCAmelCase = word.index(__lowerCAmelCase , __lowerCAmelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) _UpperCAmelCase = j if word[i] == first and i < len(__lowerCAmelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 _UpperCAmelCase = tuple(__lowerCAmelCase ) _UpperCAmelCase = new_word if len(__lowerCAmelCase ) == 1: break else: _UpperCAmelCase = get_pairs(__lowerCAmelCase ) _UpperCAmelCase = """ """.join(__lowerCAmelCase ) _UpperCAmelCase = word return word def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : Dict ): _UpperCAmelCase = [] for token in re.findall(self.pat , __lowerCAmelCase ): _UpperCAmelCase = """""".join( self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowerCAmelCase ).split(""" """ ) ) return bpe_tokens def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : Union[str, Any] ): return self.encoder.get(__lowerCAmelCase , self.encoder.get(self.unk_token ) ) def lowerCAmelCase_ ( self : str , __lowerCAmelCase : Optional[Any] ): return self.decoder.get(__lowerCAmelCase ) def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : List[Any] ): _UpperCAmelCase = """""".join(__lowerCAmelCase ) _UpperCAmelCase = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors ) return text def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ): if not os.path.isdir(__lowerCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return _UpperCAmelCase = os.path.join( __lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) _UpperCAmelCase = os.path.join( __lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] ) with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCAmelCase , ensure_ascii=__lowerCAmelCase ) + """\n""" ) _UpperCAmelCase = 0 with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as writer: writer.write("""#version: 0.2\n""" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCAmelCase : kv[1] ): if index != token_index: logger.warning( f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' """ Please check that the tokenizer is not corrupted!""" ) _UpperCAmelCase = token_index writer.write(""" """.join(__lowerCAmelCase ) + """\n""" ) index += 1 return vocab_file, merge_file def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _UpperCAmelCase = [self.cls_token_id] _UpperCAmelCase = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None , __lowerCAmelCase : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase ) if token_ids_a is None: return [1] + ([0] * len(__lowerCAmelCase )) + [1] return [1] + ([0] * len(__lowerCAmelCase )) + [1, 1] + ([0] * len(__lowerCAmelCase )) + [1] def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ): _UpperCAmelCase = [self.sep_token_id] _UpperCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : str=False , **__lowerCAmelCase : Dict ): _UpperCAmelCase = kwargs.pop("""add_prefix_space""" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(__lowerCAmelCase ) > 0 and not text[0].isspace()): _UpperCAmelCase = """ """ + text return (text, kwargs)
371
"""simple docstring""" from typing import Any, Dict, List, Optional, Tuple, Union import torch from torch import nn from torch.utils.data import DistributedSampler, RandomSampler from transformers import PreTrainedModel, Trainer, logging from transformers.integrations import is_fairscale_available from transformers.models.fsmt.configuration_fsmt import FSMTConfig from transformers.optimization import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.trainer_pt_utils import get_tpu_sampler from transformers.training_args import ParallelMode from transformers.utils import is_torch_tpu_available if is_fairscale_available(): from fairscale.optim import OSS UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = { """linear""": get_linear_schedule_with_warmup, """cosine""": get_cosine_schedule_with_warmup, """cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup, """polynomial""": get_polynomial_decay_schedule_with_warmup, """constant""": get_constant_schedule, """constant_w_warmup""": get_constant_schedule_with_warmup, } class a ( lowerCAmelCase_ ): def __init__( self : Optional[int] , __lowerCAmelCase : Any=None , __lowerCAmelCase : Any=None , *__lowerCAmelCase : Union[str, Any] , **__lowerCAmelCase : Optional[int] ): super().__init__(*__lowerCAmelCase , **__lowerCAmelCase ) if config is None: assert isinstance(self.model , __lowerCAmelCase ), ( "If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is" f''' {self.model.__class__}''' ) _UpperCAmelCase = self.model.config else: _UpperCAmelCase = config _UpperCAmelCase = data_args _UpperCAmelCase = self.config.tgt_vocab_size if isinstance(self.config , __lowerCAmelCase ) else self.config.vocab_size if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss): assert self.config.pad_token_id is not None, ( "Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss" " calculation or doing label smoothing." ) if self.config.pad_token_id is None and self.config.eos_token_id is not None: logger.warning( f'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for''' """ padding..""" ) if self.args.label_smoothing == 0: _UpperCAmelCase = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id ) else: # dynamically import label_smoothed_nll_loss from utils import label_smoothed_nll_loss _UpperCAmelCase = label_smoothed_nll_loss def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : int ): if self.optimizer is None: _UpperCAmelCase = ["""bias""", """LayerNorm.weight"""] _UpperCAmelCase = [ { """params""": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )], """weight_decay""": self.args.weight_decay, }, { """params""": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )], """weight_decay""": 0.0, }, ] _UpperCAmelCase = Adafactor if self.args.adafactor else AdamW if self.args.adafactor: _UpperCAmelCase = Adafactor _UpperCAmelCase = {"""scale_parameter""": False, """relative_step""": False} else: _UpperCAmelCase = AdamW _UpperCAmelCase = { """betas""": (self.args.adam_betaa, self.args.adam_betaa), """eps""": self.args.adam_epsilon, } _UpperCAmelCase = self.args.learning_rate if self.sharded_ddp: _UpperCAmelCase = OSS( params=__lowerCAmelCase , optim=__lowerCAmelCase , **__lowerCAmelCase , ) else: _UpperCAmelCase = optimizer_cls(__lowerCAmelCase , **__lowerCAmelCase ) if self.lr_scheduler is None: _UpperCAmelCase = self._get_lr_scheduler(__lowerCAmelCase ) else: # ignoring --lr_scheduler logger.warning("""scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.""" ) def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : List[str] ): _UpperCAmelCase = arg_to_scheduler[self.args.lr_scheduler] if self.args.lr_scheduler == "constant": _UpperCAmelCase = schedule_func(self.optimizer ) elif self.args.lr_scheduler == "constant_w_warmup": _UpperCAmelCase = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps ) else: _UpperCAmelCase = schedule_func( self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=__lowerCAmelCase ) return scheduler def lowerCAmelCase_ ( self : Optional[int] ): if isinstance(self.train_dataset , torch.utils.data.IterableDataset ): return None elif is_torch_tpu_available(): return get_tpu_sampler(self.train_dataset ) else: if self.args.sortish_sampler: self.train_dataset.make_sortish_sampler( self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , ) return ( RandomSampler(self.train_dataset ) if self.args.local_rank == -1 else DistributedSampler(self.train_dataset ) ) def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple ): if self.args.label_smoothing == 0: if self.data_args is not None and self.data_args.ignore_pad_token_for_loss: # force training to ignore pad token _UpperCAmelCase = model(**__lowerCAmelCase , use_cache=__lowerCAmelCase )[0] _UpperCAmelCase = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) ) else: # compute usual loss via models _UpperCAmelCase , _UpperCAmelCase = model(**__lowerCAmelCase , labels=__lowerCAmelCase , use_cache=__lowerCAmelCase )[:2] else: # compute label smoothed loss _UpperCAmelCase = model(**__lowerCAmelCase , use_cache=__lowerCAmelCase )[0] _UpperCAmelCase = torch.nn.functional.log_softmax(__lowerCAmelCase , dim=-1 ) _UpperCAmelCase , _UpperCAmelCase = self.loss_fn(__lowerCAmelCase , __lowerCAmelCase , self.args.label_smoothing , ignore_index=self.config.pad_token_id ) return loss, logits def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : int ): _UpperCAmelCase = inputs.pop("""labels""" ) _UpperCAmelCase , _UpperCAmelCase = self._compute_loss(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) return loss def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : nn.Module , __lowerCAmelCase : Dict[str, Union[torch.Tensor, Any]] , __lowerCAmelCase : bool , __lowerCAmelCase : Optional[List[str]] = None , ): _UpperCAmelCase = self._prepare_inputs(__lowerCAmelCase ) _UpperCAmelCase = { """max_length""": self.data_args.val_max_target_length if self.data_args is not None else self.config.max_length, """num_beams""": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams, } if self.args.predict_with_generate and not self.args.prediction_loss_only: _UpperCAmelCase = self.model.generate( inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , **__lowerCAmelCase , ) # in case the batch is shorter than max length, the output should be padded if generated_tokens.shape[-1] < gen_kwargs["max_length"]: _UpperCAmelCase = self._pad_tensors_to_max_len(__lowerCAmelCase , gen_kwargs["""max_length"""] ) _UpperCAmelCase = inputs.pop("""labels""" ) with torch.no_grad(): # compute loss on predict data _UpperCAmelCase , _UpperCAmelCase = self._compute_loss(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = loss.mean().detach() if self.args.prediction_loss_only: return (loss, None, None) _UpperCAmelCase = generated_tokens if self.args.predict_with_generate else logits if labels.shape[-1] < gen_kwargs["max_length"]: _UpperCAmelCase = self._pad_tensors_to_max_len(__lowerCAmelCase , gen_kwargs["""max_length"""] ) return (loss, logits, labels) def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] ): # If PAD token is not defined at least EOS token has to be defined _UpperCAmelCase = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id if pad_token_id is None: raise ValueError( """Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be""" f''' padded to `max_length`={max_length}''' ) _UpperCAmelCase = pad_token_id * torch.ones( (tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device ) _UpperCAmelCase = tensor return padded_tensor
30
0
"""simple docstring""" import warnings from .generation import TFGenerationMixin class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ): # warning at import time warnings.warn( 'Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will ' 'be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.' , UpperCAmelCase__ , )
109
import json import os from typing import Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCAmelCase : List[str] = logging.get_logger(__name__) UpperCAmelCase : Tuple = { "vocab_file": "vocab.json", "merges_file": "merges.txt", } UpperCAmelCase : Optional[Any] = { "vocab_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"}, "merges_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"}, } UpperCAmelCase : Union[str, Any] = { "ctrl": 2_56, } UpperCAmelCase : List[str] = { "Pregnancy": 16_86_29, "Christianity": 76_75, "Explain": 10_64_23, "Fitness": 6_34_40, "Saving": 6_31_63, "Ask": 2_71_71, "Ass": 9_59_85, "Joke": 16_35_09, "Questions": 4_56_22, "Thoughts": 4_96_05, "Retail": 5_23_42, "Feminism": 16_43_38, "Writing": 1_19_92, "Atheism": 19_22_63, "Netflix": 4_86_16, "Computing": 3_96_39, "Opinion": 4_32_13, "Alone": 4_49_67, "Funny": 5_89_17, "Gaming": 4_03_58, "Human": 40_88, "India": 13_31, "Joker": 7_71_38, "Diet": 3_62_06, "Legal": 1_18_59, "Norman": 49_39, "Tip": 7_26_89, "Weight": 5_23_43, "Movies": 4_62_73, "Running": 2_34_25, "Science": 20_90, "Horror": 3_77_93, "Confession": 6_05_72, "Finance": 1_22_50, "Politics": 1_63_60, "Scary": 19_19_85, "Support": 1_26_54, "Technologies": 3_25_16, "Teenage": 6_61_60, "Event": 3_27_69, "Learned": 6_74_60, "Notion": 18_27_70, "Wikipedia": 3_75_83, "Books": 66_65, "Extract": 7_60_50, "Confessions": 10_27_01, "Conspiracy": 7_59_32, "Links": 6_36_74, "Narcissus": 15_04_25, "Relationship": 5_47_66, "Relationships": 13_47_96, "Reviews": 4_16_71, "News": 42_56, "Translation": 2_68_20, "multilingual": 12_84_06, } def __lowerCamelCase ( lowerCamelCase__ : Any ): '''simple docstring''' lowerCamelCase = set() lowerCamelCase = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowerCamelCase = char lowerCamelCase = set(lowerCamelCase__ ) return pairs class __lowercase ( a_ ): """simple docstring""" UpperCamelCase : Any = VOCAB_FILES_NAMES UpperCamelCase : str = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase : Optional[int] = CONTROL_CODES def __init__( self , A , A , A="<unk>" , **A ) -> int: '''simple docstring''' super().__init__(unk_token=A , **A ) with open(A , encoding="""utf-8""" ) as vocab_handle: lowerCamelCase = json.load(A ) lowerCamelCase = {v: k for k, v in self.encoder.items()} with open(A , encoding="""utf-8""" ) as merges_handle: lowerCamelCase = merges_handle.read().split("""\n""" )[1:-1] lowerCamelCase = [tuple(merge.split() ) for merge in merges] lowerCamelCase = dict(zip(A , range(len(A ) ) ) ) lowerCamelCase = {} @property def __A ( self ) -> Union[str, Any]: '''simple docstring''' return len(self.encoder ) def __A ( self ) -> List[str]: '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder ) def __A ( self , A ) -> str: '''simple docstring''' if token in self.cache: return self.cache[token] lowerCamelCase = tuple(A ) lowerCamelCase = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] ) lowerCamelCase = get_pairs(A ) if not pairs: return token while True: lowerCamelCase = min(A , key=lambda A : self.bpe_ranks.get(A , float("""inf""" ) ) ) if bigram not in self.bpe_ranks: break lowerCamelCase , lowerCamelCase = bigram lowerCamelCase = [] lowerCamelCase = 0 while i < len(A ): try: lowerCamelCase = word.index(A , A ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowerCamelCase = j if word[i] == first and i < len(A ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowerCamelCase = tuple(A ) lowerCamelCase = new_word if len(A ) == 1: break else: lowerCamelCase = get_pairs(A ) lowerCamelCase = """@@ """.join(A ) lowerCamelCase = word[:-4] lowerCamelCase = word return word def __A ( self , A ) -> Optional[int]: '''simple docstring''' lowerCamelCase = [] lowerCamelCase = re.findall(r"""\S+\n?""" , A ) for token in words: split_tokens.extend(list(self.bpe(A ).split(""" """ ) ) ) return split_tokens def __A ( self , A ) -> int: '''simple docstring''' return self.encoder.get(A , self.encoder.get(self.unk_token ) ) def __A ( self , A ) -> Any: '''simple docstring''' return self.decoder.get(A , self.unk_token ) def __A ( self , A ) -> str: '''simple docstring''' lowerCamelCase = """ """.join(A ).replace("""@@ """ , """""" ).strip() return out_string def __A ( self , A , A = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(A ): logger.error(F'Vocabulary path ({save_directory}) should be a directory' ) return lowerCamelCase = os.path.join( A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) lowerCamelCase = os.path.join( A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] ) with open(A , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=A , ensure_ascii=A ) + """\n""" ) lowerCamelCase = 0 with open(A , """w""" , encoding="""utf-8""" ) as writer: writer.write("""#version: 0.2\n""" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda A : kv[1] ): if index != token_index: logger.warning( F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.' """ Please check that the tokenizer is not corrupted!""" ) lowerCamelCase = token_index writer.write(""" """.join(A ) + """\n""" ) index += 1 return vocab_file, merge_file # def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True): # filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens)) # tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens) # tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far) # return ''.join(tokens_generated_so_far)
252
0
"""simple docstring""" import inspect import math import tempfile import unittest import numpy as np from transformers import ViTMAEConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMAEForPreTraining, ViTMAEModel from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class __UpperCamelCase : def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=30 , lowerCAmelCase__=2 , lowerCAmelCase__=3 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=32 , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=37 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=10 , lowerCAmelCase__=0.02 , lowerCAmelCase__=3 , lowerCAmelCase__=0.6 , lowerCAmelCase__=None , ) -> Tuple: a : Optional[int] = parent a : str = batch_size a : str = image_size a : List[Any] = patch_size a : int = num_channels a : int = is_training a : Any = use_labels a : str = hidden_size a : List[Any] = num_hidden_layers a : List[Any] = num_attention_heads a : List[Any] = intermediate_size a : Dict = hidden_act a : List[Any] = hidden_dropout_prob a : Union[str, Any] = attention_probs_dropout_prob a : Optional[Any] = type_sequence_label_size a : List[str] = initializer_range a : List[Any] = mask_ratio a : Optional[int] = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) a : Tuple = (image_size // patch_size) ** 2 a : Tuple = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def __a ( self ) -> List[Any]: a : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) a : int = None if self.use_labels: a : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a : Optional[Any] = self.get_config() return config, pixel_values, labels def __a ( self ) -> Dict: return ViTMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , ) def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str: a : List[str] = ViTMAEModel(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() a : Any = model(lowerCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any: a : Optional[int] = ViTMAEForPreTraining(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() a : Union[str, Any] = model(lowerCAmelCase__ ) a : Optional[Any] = (self.image_size // self.patch_size) ** 2 a : List[Any] = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) # test greyscale images a : Dict = 1 a : Dict = ViTMAEForPreTraining(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() a : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) a : Dict = model(lowerCAmelCase__ ) a : Tuple = self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) def __a ( self ) -> List[Any]: a : List[str] = self.prepare_config_and_inputs() a : str = config_and_inputs a : List[str] = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class __UpperCamelCase ( a__ , a__ , unittest.TestCase ): lowerCamelCase : str =(ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else () lowerCamelCase : Optional[int] ={"""feature-extraction""": ViTMAEModel} if is_torch_available() else {} lowerCamelCase : Dict =False lowerCamelCase : Optional[int] =False lowerCamelCase : str =False lowerCamelCase : Tuple =False def __a ( self ) -> Optional[Any]: a : Union[str, Any] = ViTMAEModelTester(self ) a : Dict = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=37 ) def __a ( self ) -> int: self.config_tester.run_common_tests() @unittest.skip(reason="ViTMAE does not use inputs_embeds" ) def __a ( self ) -> str: pass def __a ( self ) -> Optional[int]: a : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a : Optional[Any] = model_class(lowerCAmelCase__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) a : List[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCAmelCase__ , nn.Linear ) ) def __a ( self ) -> List[Any]: a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a : str = model_class(lowerCAmelCase__ ) a : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic a : Optional[int] = [*signature.parameters.keys()] a : List[Any] = ["pixel_values"] self.assertListEqual(arg_names[:1] , lowerCAmelCase__ ) def __a ( self ) -> Dict: a : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase__ ) def __a ( self ) -> Tuple: a : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*lowerCAmelCase__ ) def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict: # make masks reproducible np.random.seed(2 ) a : List[Any] = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 ) a : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) a : List[Any] = torch.from_numpy(lowerCAmelCase__ ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument a : int = pt_noise super().check_pt_tf_models(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) def __a ( self ) -> Optional[int]: a : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a : List[Any] = model_class(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() # make random mask reproducible torch.manual_seed(2 ) with torch.no_grad(): a : Any = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) ) a : Tuple = outputs[0].cpu().numpy() a : Optional[Any] = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(lowerCAmelCase__ ) a : Optional[int] = model_class.from_pretrained(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) # make random mask reproducible torch.manual_seed(2 ) with torch.no_grad(): a : Union[str, Any] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) ) # Make sure we don't have nans a : Tuple = after_outputs[0].cpu().numpy() a : str = 0 a : Union[str, Any] = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(lowerCAmelCase__ , 1E-5 ) @unittest.skip( reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." ) def __a ( self ) -> Optional[int]: pass @unittest.skip( reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." ) def __a ( self ) -> Optional[Any]: pass @unittest.skip( reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." ) def __a ( self ) -> Union[str, Any]: pass @unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" ) def __a ( self ) -> Any: pass @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def __a ( self ) -> Union[str, Any]: pass @slow def __a ( self ) -> str: for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a : str = ViTMAEModel.from_pretrained(lowerCAmelCase__ ) self.assertIsNotNone(lowerCAmelCase__ ) def _SCREAMING_SNAKE_CASE ( ) ->Tuple: '''simple docstring''' a : Any = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class __UpperCamelCase ( unittest.TestCase ): @cached_property def __a ( self ) -> str: return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None @slow def __a ( self ) -> Union[str, Any]: # make random mask reproducible across the PT and TF model np.random.seed(2 ) a : Dict = ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" ).to(lowerCAmelCase__ ) a : List[str] = self.default_image_processor a : Union[str, Any] = prepare_img() a : Union[str, Any] = image_processor(images=lowerCAmelCase__ , return_tensors="pt" ).to(lowerCAmelCase__ ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) a : List[str] = ViTMAEConfig() a : int = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) a : List[str] = np.random.uniform(size=(1, num_patches) ) # forward pass with torch.no_grad(): a : Dict = model(**lowerCAmelCase__ , noise=torch.from_numpy(lowerCAmelCase__ ).to(device=lowerCAmelCase__ ) ) # verify the logits a : Optional[Any] = torch.Size((1, 196, 768) ) self.assertEqual(outputs.logits.shape , lowerCAmelCase__ ) a : Optional[Any] = torch.tensor( [[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(lowerCAmelCase__ ) , atol=1E-4 ) )
354
"""simple docstring""" import jax.numpy as jnp from ...utils import logging from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel from .configuration_mta import MTaConfig a : List[str] = logging.get_logger(__name__) a : Optional[int] = '''T5Config''' def _SCREAMING_SNAKE_CASE ( _lowercase : jnp.array , _lowercase : int , _lowercase : int ) ->jnp.ndarray: '''simple docstring''' a : Tuple = jnp.zeros_like(_lowercase ) a : Tuple = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] ) a : Dict = shifted_input_ids.at[:, 0].set(_lowercase ) a : Optional[Any] = jnp.where(shifted_input_ids == -100 , _lowercase , _lowercase ) return shifted_input_ids class __UpperCamelCase ( a__ ): lowerCamelCase : Any ="""mt5""" lowerCamelCase : Dict =MTaConfig class __UpperCamelCase ( a__ ): lowerCamelCase : str ="""mt5""" lowerCamelCase : Tuple =MTaConfig class __UpperCamelCase ( a__ ): lowerCamelCase : List[str] ="""mt5""" lowerCamelCase : Tuple =MTaConfig
79
0
'''simple docstring''' import os import tempfile import unittest import numpy as np from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline @require_flax class __lowerCAmelCase (unittest.TestCase ): '''simple docstring''' def UpperCamelCase__ (self : Optional[Any] ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdirname: # pipeline has Flax weights lowercase__ = FlaxDiffusionPipeline.from_pretrained( '''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=UpperCamelCase , cache_dir=UpperCamelCase ) lowercase__ = [t[-1] for t in os.walk(os.path.join(UpperCamelCase , os.listdir(UpperCamelCase )[0] , '''snapshots''' ) )] lowercase__ = [item for sublist in all_root_files for item in sublist] # None of the downloaded files should be a PyTorch file even if we have some here: # https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin assert not any(f.endswith('''.bin''' ) for f in files ) @slow @require_flax class __lowerCAmelCase (unittest.TestCase ): '''simple docstring''' def UpperCamelCase__ (self : Tuple ): '''simple docstring''' lowercase__ ,lowercase__ = FlaxStableDiffusionPipeline.from_pretrained( '''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=UpperCamelCase ) lowercase__ = ( '''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of''' ''' field, close up, split lighting, cinematic''' ) lowercase__ = jax.random.PRNGKey(0 ) lowercase__ = 4 lowercase__ = jax.device_count() lowercase__ = num_samples * [prompt] lowercase__ = pipeline.prepare_inputs(UpperCamelCase ) # shard inputs and rng lowercase__ = replicate(UpperCamelCase ) lowercase__ = jax.random.split(UpperCamelCase , UpperCamelCase ) lowercase__ = shard(UpperCamelCase ) lowercase__ = pipeline(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , jit=UpperCamelCase ).images assert images.shape == (num_samples, 1, 64, 64, 3) if jax.device_count() == 8: assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_51_47_45 ) < 1E-3 assert np.abs(np.abs(UpperCamelCase , dtype=np.floataa ).sum() - 4_99_47.8_75 ) < 5E-1 lowercase__ = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) ) assert len(UpperCamelCase ) == num_samples def UpperCamelCase__ (self : Optional[int] ): '''simple docstring''' lowercase__ ,lowercase__ = FlaxStableDiffusionPipeline.from_pretrained( '''CompVis/stable-diffusion-v1-4''' , revision='''flax''' , safety_checker=UpperCamelCase ) lowercase__ = ( '''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of''' ''' field, close up, split lighting, cinematic''' ) lowercase__ = jax.random.PRNGKey(0 ) lowercase__ = 50 lowercase__ = jax.device_count() lowercase__ = num_samples * [prompt] lowercase__ = pipeline.prepare_inputs(UpperCamelCase ) # shard inputs and rng lowercase__ = replicate(UpperCamelCase ) lowercase__ = jax.random.split(UpperCamelCase , UpperCamelCase ) lowercase__ = shard(UpperCamelCase ) lowercase__ = pipeline(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , jit=UpperCamelCase ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05_65_24_01) ) < 1E-3 assert np.abs((np.abs(UpperCamelCase , dtype=np.floataa ).sum() - 2_38_38_08.2) ) < 5E-1 def UpperCamelCase__ (self : str ): '''simple docstring''' lowercase__ ,lowercase__ = FlaxStableDiffusionPipeline.from_pretrained( '''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=UpperCamelCase ) lowercase__ = ( '''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of''' ''' field, close up, split lighting, cinematic''' ) lowercase__ = jax.random.PRNGKey(0 ) lowercase__ = 50 lowercase__ = jax.device_count() lowercase__ = num_samples * [prompt] lowercase__ = pipeline.prepare_inputs(UpperCamelCase ) # shard inputs and rng lowercase__ = replicate(UpperCamelCase ) lowercase__ = jax.random.split(UpperCamelCase , UpperCamelCase ) lowercase__ = shard(UpperCamelCase ) lowercase__ = pipeline(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , jit=UpperCamelCase ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1E-3 assert np.abs((np.abs(UpperCamelCase , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5E-1 def UpperCamelCase__ (self : str ): '''simple docstring''' lowercase__ ,lowercase__ = FlaxStableDiffusionPipeline.from_pretrained( '''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa ) lowercase__ = ( '''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of''' ''' field, close up, split lighting, cinematic''' ) lowercase__ = jax.random.PRNGKey(0 ) lowercase__ = 50 lowercase__ = jax.device_count() lowercase__ = num_samples * [prompt] lowercase__ = pipeline.prepare_inputs(UpperCamelCase ) # shard inputs and rng lowercase__ = replicate(UpperCamelCase ) lowercase__ = jax.random.split(UpperCamelCase , UpperCamelCase ) lowercase__ = shard(UpperCamelCase ) lowercase__ = pipeline(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , jit=UpperCamelCase ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1E-3 assert np.abs((np.abs(UpperCamelCase , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5E-1 def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' lowercase__ = FlaxDDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , set_alpha_to_one=UpperCamelCase , steps_offset=1 , ) lowercase__ ,lowercase__ = FlaxStableDiffusionPipeline.from_pretrained( '''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , scheduler=UpperCamelCase , safety_checker=UpperCamelCase , ) lowercase__ = scheduler.create_state() lowercase__ = scheduler_state lowercase__ = ( '''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of''' ''' field, close up, split lighting, cinematic''' ) lowercase__ = jax.random.PRNGKey(0 ) lowercase__ = 50 lowercase__ = jax.device_count() lowercase__ = num_samples * [prompt] lowercase__ = pipeline.prepare_inputs(UpperCamelCase ) # shard inputs and rng lowercase__ = replicate(UpperCamelCase ) lowercase__ = jax.random.split(UpperCamelCase , UpperCamelCase ) lowercase__ = shard(UpperCamelCase ) lowercase__ = pipeline(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , jit=UpperCamelCase ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_45_04_39_45) ) < 1E-3 assert np.abs((np.abs(UpperCamelCase , dtype=np.floataa ).sum() - 2_34_76_93.5) ) < 5E-1 def UpperCamelCase__ (self : str ): '''simple docstring''' lowercase__ = ( '''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of''' ''' field, close up, split lighting, cinematic''' ) lowercase__ = jax.device_count() lowercase__ = num_samples * [prompt] lowercase__ = jax.random.split(jax.random.PRNGKey(0 ) , UpperCamelCase ) lowercase__ ,lowercase__ = FlaxStableDiffusionPipeline.from_pretrained( '''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=UpperCamelCase , ) lowercase__ = replicate(UpperCamelCase ) lowercase__ = pipeline.prepare_inputs(UpperCamelCase ) lowercase__ = shard(UpperCamelCase ) lowercase__ = pipeline(UpperCamelCase , UpperCamelCase , UpperCamelCase , jit=UpperCamelCase ).images assert images.shape == (num_samples, 1, 512, 512, 3) lowercase__ = images[2, 0, 256, 10:17, 1] # With memory efficient attention lowercase__ ,lowercase__ = FlaxStableDiffusionPipeline.from_pretrained( '''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=UpperCamelCase , use_memory_efficient_attention=UpperCamelCase , ) lowercase__ = replicate(UpperCamelCase ) lowercase__ = pipeline.prepare_inputs(UpperCamelCase ) lowercase__ = shard(UpperCamelCase ) lowercase__ = pipeline(UpperCamelCase , UpperCamelCase , UpperCamelCase , jit=UpperCamelCase ).images assert images_eff.shape == (num_samples, 1, 512, 512, 3) lowercase__ = images[2, 0, 256, 10:17, 1] # I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum` # over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now. assert abs(slice_eff - slice ).max() < 1E-2
2
'''simple docstring''' import os from itertools import chain from random import randrange, shuffle import pytest from .sola import PokerHand lowerCamelCase : Optional[Any] = ( '4S 3H 2C 7S 5H', '9D 8H 2C 6S 7H', '2D 6D 9D TH 7D', 'TC 8C 2S JH 6C', 'JH 8S TH AH QH', 'TS KS 5S 9S AC', 'KD 6S 9D TH AD', 'KS 8D 4D 9S 4S', # pair '8C 4S KH JS 4D', # pair 'QH 8H KD JH 8S', # pair 'KC 4H KS 2H 8D', # pair 'KD 4S KC 3H 8S', # pair 'AH 8S AS KC JH', # pair '3H 4C 4H 3S 2H', # 2 pairs '5S 5D 2C KH KH', # 2 pairs '3C KH 5D 5S KH', # 2 pairs 'AS 3C KH AD KH', # 2 pairs '7C 7S 3S 7H 5S', # 3 of a kind '7C 7S KH 2H 7H', # 3 of a kind 'AC KH QH AH AS', # 3 of a kind '2H 4D 3C AS 5S', # straight (low ace) '3C 5C 4C 2C 6H', # straight '6S 8S 7S 5H 9H', # straight 'JS QS 9H TS KH', # straight 'QC KH TS JS AH', # straight (high ace) '8C 9C 5C 3C TC', # flush '3S 8S 9S 5S KS', # flush '4C 5C 9C 8C KC', # flush 'JH 8H AH KH QH', # flush '3D 2H 3H 2C 2D', # full house '2H 2C 3S 3H 3D', # full house 'KH KC 3S 3H 3D', # full house 'JC 6H JS JD JH', # 4 of a kind 'JC 7H JS JD JH', # 4 of a kind 'JC KH JS JD JH', # 4 of a kind '2S AS 4S 5S 3S', # straight flush (low ace) '2D 6D 3D 4D 5D', # straight flush '5C 6C 3C 7C 4C', # straight flush 'JH 9H TH KH QH', # straight flush 'JH AH TH KH QH', # royal flush (high ace straight flush) ) lowerCamelCase : Tuple = ( ('2H 3H 4H 5H 6H', 'KS AS TS QS JS', 'Loss'), ('2H 3H 4H 5H 6H', 'AS AD AC AH JD', 'Win'), ('AS AH 2H AD AC', 'JS JD JC JH 3D', 'Win'), ('2S AH 2H AS AC', 'JS JD JC JH AD', 'Loss'), ('2S AH 2H AS AC', '2H 3H 5H 6H 7H', 'Win'), ('AS 3S 4S 8S 2S', '2H 3H 5H 6H 7H', 'Win'), ('2H 3H 5H 6H 7H', '2S 3H 4H 5S 6C', 'Win'), ('2S 3H 4H 5S 6C', '3D 4C 5H 6H 2S', 'Tie'), ('2S 3H 4H 5S 6C', 'AH AC 5H 6H AS', 'Win'), ('2S 2H 4H 5S 4C', 'AH AC 5H 6H AS', 'Loss'), ('2S 2H 4H 5S 4C', 'AH AC 5H 6H 7S', 'Win'), ('6S AD 7H 4S AS', 'AH AC 5H 6H 7S', 'Loss'), ('2S AH 4H 5S KC', 'AH AC 5H 6H 7S', 'Loss'), ('2S 3H 6H 7S 9C', '7H 3C TH 6H 9S', 'Loss'), ('4S 5H 6H TS AC', '3S 5H 6H TS AC', 'Win'), ('2S AH 4H 5S 6C', 'AD 4C 5H 6H 2C', 'Tie'), ('AS AH 3H AD AC', 'AS AH 2H AD AC', 'Win'), ('AH AC 5H 5C QS', 'AH AC 5H 5C KS', 'Loss'), ('AH AC 5H 5C QS', 'KH KC 5H 5C QS', 'Win'), ('7C 7S KH 2H 7H', '3C 3S AH 2H 3H', 'Win'), ('3C 3S AH 2H 3H', '7C 7S KH 2H 7H', 'Loss'), ('6H 5H 4H 3H 2H', '5H 4H 3H 2H AH', 'Win'), ('5H 4H 3H 2H AH', '5H 4H 3H 2H AH', 'Tie'), ('5H 4H 3H 2H AH', '6H 5H 4H 3H 2H', 'Loss'), ('AH AD KS KC AC', 'AH KD KH AC KC', 'Win'), ('2H 4D 3C AS 5S', '2H 4D 3C 6S 5S', 'Loss'), ('2H 3S 3C 3H 2S', '3S 3C 2S 2H 2D', 'Win'), ('4D 6D 5D 2D JH', '3S 8S 3H TC KH', 'Loss'), ('4S 6C 8S 3S 7S', 'AD KS 2D 7D 7C', 'Loss'), ('6S 4C 7H 8C 3H', '5H JC AH 9D 9C', 'Loss'), ('9D 9H JH TC QH', '3C 2S JS 5C 7H', 'Win'), ('2H TC 8S AD 9S', '4H TS 7H 2C 5C', 'Win'), ('9D 3S 2C 7S 7C', 'JC TD 3C TC 9H', 'Loss'), ) lowerCamelCase : Dict = ( ('2H 3H 4H 5H 6H', True), ('AS AH 2H AD AC', False), ('2H 3H 5H 6H 7H', True), ('KS AS TS QS JS', True), ('8H 9H QS JS TH', False), ('AS 3S 4S 8S 2S', True), ) lowerCamelCase : Any = ( ('2H 3H 4H 5H 6H', True), ('AS AH 2H AD AC', False), ('2H 3H 5H 6H 7H', False), ('KS AS TS QS JS', True), ('8H 9H QS JS TH', True), ) lowerCamelCase : Tuple = ( ('2H 4D 3C AS 5S', True, [5, 4, 3, 2, 14]), ('2H 5D 3C AS 5S', False, [14, 5, 5, 3, 2]), ('JH QD KC AS TS', False, [14, 13, 12, 11, 10]), ('9D 3S 2C 7S 7C', False, [9, 7, 7, 3, 2]), ) lowerCamelCase : Optional[int] = ( ('JH AH TH KH QH', 0), ('JH 9H TH KH QH', 0), ('JC KH JS JD JH', 7), ('KH KC 3S 3H 3D', 6), ('8C 9C 5C 3C TC', 0), ('JS QS 9H TS KH', 0), ('7C 7S KH 2H 7H', 3), ('3C KH 5D 5S KH', 2), ('QH 8H KD JH 8S', 1), ('2D 6D 9D TH 7D', 0), ) lowerCamelCase : Dict = ( ('JH AH TH KH QH', 23), ('JH 9H TH KH QH', 22), ('JC KH JS JD JH', 21), ('KH KC 3S 3H 3D', 20), ('8C 9C 5C 3C TC', 19), ('JS QS 9H TS KH', 18), ('7C 7S KH 2H 7H', 17), ('3C KH 5D 5S KH', 16), ('QH 8H KD JH 8S', 15), ('2D 6D 9D TH 7D', 14), ) def _SCREAMING_SNAKE_CASE () -> Union[str, Any]: """simple docstring""" lowercase__ ,lowercase__ = randrange(len(A ) ), randrange(len(A ) ) lowercase__ = ['''Loss''', '''Tie''', '''Win'''][(play >= oppo) + (play > oppo)] lowercase__ ,lowercase__ = SORTED_HANDS[play], SORTED_HANDS[oppo] return hand, other, expected def _SCREAMING_SNAKE_CASE (A = 100 ) -> str: """simple docstring""" return (generate_random_hand() for _ in range(A )) @pytest.mark.parametrize('''hand, expected''' , A ) def _SCREAMING_SNAKE_CASE (A , A ) -> List[str]: """simple docstring""" assert PokerHand(A )._is_flush() == expected @pytest.mark.parametrize('''hand, expected''' , A ) def _SCREAMING_SNAKE_CASE (A , A ) -> Union[str, Any]: """simple docstring""" assert PokerHand(A )._is_straight() == expected @pytest.mark.parametrize('''hand, expected, card_values''' , A ) def _SCREAMING_SNAKE_CASE (A , A , A ) -> Any: """simple docstring""" lowercase__ = PokerHand(A ) assert player._is_five_high_straight() == expected assert player._card_values == card_values @pytest.mark.parametrize('''hand, expected''' , A ) def _SCREAMING_SNAKE_CASE (A , A ) -> Tuple: """simple docstring""" assert PokerHand(A )._is_same_kind() == expected @pytest.mark.parametrize('''hand, expected''' , A ) def _SCREAMING_SNAKE_CASE (A , A ) -> Optional[Any]: """simple docstring""" assert PokerHand(A )._hand_type == expected @pytest.mark.parametrize('''hand, other, expected''' , A ) def _SCREAMING_SNAKE_CASE (A , A , A ) -> Union[str, Any]: """simple docstring""" assert PokerHand(A ).compare_with(PokerHand(A ) ) == expected @pytest.mark.parametrize('''hand, other, expected''' , generate_random_hands() ) def _SCREAMING_SNAKE_CASE (A , A , A ) -> Optional[Any]: """simple docstring""" assert PokerHand(A ).compare_with(PokerHand(A ) ) == expected def _SCREAMING_SNAKE_CASE () -> Tuple: """simple docstring""" lowercase__ = [PokerHand(A ) for hand in SORTED_HANDS] lowercase__ = poker_hands.copy() shuffle(A ) lowercase__ = chain(sorted(A ) ) for index, hand in enumerate(A ): assert hand == poker_hands[index] def _SCREAMING_SNAKE_CASE () -> List[Any]: """simple docstring""" lowercase__ = [PokerHand('''2D AC 3H 4H 5S''' ), PokerHand('''2S 3H 4H 5S 6C''' )] pokerhands.sort(reverse=A ) assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C" def _SCREAMING_SNAKE_CASE () -> int: """simple docstring""" lowercase__ = PokerHand('''2C 4S AS 3D 5C''' ) lowercase__ = True lowercase__ = [5, 4, 3, 2, 14] for _ in range(10 ): assert pokerhand._is_five_high_straight() == expected assert pokerhand._card_values == expected_card_values def _SCREAMING_SNAKE_CASE () -> Union[str, Any]: """simple docstring""" lowercase__ = 0 lowercase__ = os.path.abspath(os.path.dirname(A ) ) lowercase__ = os.path.join(A , '''poker_hands.txt''' ) with open(A ) as file_hand: for line in file_hand: lowercase__ = line[:14].strip() lowercase__ = line[15:].strip() lowercase__ ,lowercase__ = PokerHand(A ), PokerHand(A ) lowercase__ = player.compare_with(A ) if output == "Win": answer += 1 assert answer == 376
2
1
import random def _a ( SCREAMING_SNAKE_CASE_ : str ): __lowerCAmelCase = num - 1 __lowerCAmelCase = 0 while s % 2 == 0: __lowerCAmelCase = s // 2 t += 1 for _ in range(5 ): __lowerCAmelCase = random.randrange(2 , num - 1 ) __lowerCAmelCase = pow(_lowercase , _lowercase , _lowercase ) if v != 1: __lowerCAmelCase = 0 while v != (num - 1): if i == t - 1: return False else: __lowerCAmelCase = i + 1 __lowerCAmelCase = (v**2) % num return True def _a ( SCREAMING_SNAKE_CASE_ : Optional[Any] ): if num < 2: return False __lowerCAmelCase = [ 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 1_01, 1_03, 1_07, 1_09, 1_13, 1_27, 1_31, 1_37, 1_39, 1_49, 1_51, 1_57, 1_63, 1_67, 1_73, 1_79, 1_81, 1_91, 1_93, 1_97, 1_99, 2_11, 2_23, 2_27, 2_29, 2_33, 2_39, 2_41, 2_51, 2_57, 2_63, 2_69, 2_71, 2_77, 2_81, 2_83, 2_93, 3_07, 3_11, 3_13, 3_17, 3_31, 3_37, 3_47, 3_49, 3_53, 3_59, 3_67, 3_73, 3_79, 3_83, 3_89, 3_97, 4_01, 4_09, 4_19, 4_21, 4_31, 4_33, 4_39, 4_43, 4_49, 4_57, 4_61, 4_63, 4_67, 4_79, 4_87, 4_91, 4_99, 5_03, 5_09, 5_21, 5_23, 5_41, 5_47, 5_57, 5_63, 5_69, 5_71, 5_77, 5_87, 5_93, 5_99, 6_01, 6_07, 6_13, 6_17, 6_19, 6_31, 6_41, 6_43, 6_47, 6_53, 6_59, 6_61, 6_73, 6_77, 6_83, 6_91, 7_01, 7_09, 7_19, 7_27, 7_33, 7_39, 7_43, 7_51, 7_57, 7_61, 7_69, 7_73, 7_87, 7_97, 8_09, 8_11, 8_21, 8_23, 8_27, 8_29, 8_39, 8_53, 8_57, 8_59, 8_63, 8_77, 8_81, 8_83, 8_87, 9_07, 9_11, 9_19, 9_29, 9_37, 9_41, 9_47, 9_53, 9_67, 9_71, 9_77, 9_83, 9_91, 9_97, ] if num in low_primes: return True for prime in low_primes: if (num % prime) == 0: return False return rabin_miller(_lowercase ) def _a ( SCREAMING_SNAKE_CASE_ : Optional[int] = 10_24 ): while True: __lowerCAmelCase = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) ) if is_prime_low_num(_lowercase ): return num if __name__ == "__main__": UpperCamelCase__ = generate_large_prime() print(("""Prime number:""", num)) print(("""is_prime_low_num:""", is_prime_low_num(num)))
359
import math def _a ( SCREAMING_SNAKE_CASE_ : int ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def _a ( SCREAMING_SNAKE_CASE_ : int = 1_00_01 ): try: __lowerCAmelCase = int(SCREAMING_SNAKE_CASE_ ) except (TypeError, ValueError): raise TypeError("Parameter nth must be int or castable to int." ) from None if nth <= 0: raise ValueError("Parameter nth must be greater than or equal to one." ) __lowerCAmelCase = [] __lowerCAmelCase = 2 while len(SCREAMING_SNAKE_CASE_ ) < nth: if is_prime(SCREAMING_SNAKE_CASE_ ): primes.append(SCREAMING_SNAKE_CASE_ ) num += 1 else: num += 1 return primes[len(SCREAMING_SNAKE_CASE_ ) - 1] if __name__ == "__main__": print(f'''{solution() = }''')
102
0
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( HubertConfig, HubertForCTC, HubertModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''w2v_encoder.proj''': '''lm_head''', '''mask_emb''': '''masked_spec_embed''', } def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Optional[Any]: for attribute in key.split('''.''' ): A: str = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if weight_type is not None: A: Any = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).shape else: A: List[Any] = hf_pointer.shape assert hf_shape == value.shape, ( F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": A: List[Any] = value elif weight_type == "weight_g": A: Dict = value elif weight_type == "weight_v": A: Any = value elif weight_type == "bias": A: Optional[Any] = value else: A: Any = value logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" ) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> Tuple: A: Optional[int] = [] A: str = fairseq_model.state_dict() A: Tuple = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): A: List[Any] = False if "conv_layers" in name: load_conv_layer( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , hf_model.config.feat_extract_norm == '''group''' , ) A: Optional[int] = True else: for key, mapped_key in MAPPING.items(): A: Any = '''hubert.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key if key in name or (key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0] and not is_finetuned): A: List[Any] = True if "*" in mapped_key: A: List[str] = name.split(SCREAMING_SNAKE_CASE__ )[0].split('''.''' )[-2] A: List[str] = mapped_key.replace('''*''' , SCREAMING_SNAKE_CASE__ ) if "weight_g" in name: A: str = '''weight_g''' elif "weight_v" in name: A: Tuple = '''weight_v''' elif "weight" in name: A: List[Any] = '''weight''' elif "bias" in name: A: Tuple = '''bias''' else: A: Any = None set_recursively(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) continue if not is_used: unused_weights.append(SCREAMING_SNAKE_CASE__ ) logger.warning(F"""Unused weights: {unused_weights}""" ) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Union[str, Any]: A: List[str] = full_name.split('''conv_layers.''' )[-1] A: Tuple = name.split('''.''' ) A: Any = int(items[0] ) A: Dict = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) A: Optional[int] = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) A: Optional[int] = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) A: Dict = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) A: Tuple = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(SCREAMING_SNAKE_CASE__ ) @torch.no_grad() def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase=None , __lowercase=None , __lowercase=True ) -> str: if config_path is not None: A: Union[str, Any] = HubertConfig.from_pretrained(SCREAMING_SNAKE_CASE__ ) else: A: Union[str, Any] = HubertConfig() if is_finetuned: if dict_path: A: Union[str, Any] = Dictionary.load(SCREAMING_SNAKE_CASE__ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq A: List[Any] = target_dict.pad_index A: Optional[Any] = target_dict.bos_index A: Optional[int] = target_dict.eos_index A: Union[str, Any] = len(target_dict.symbols ) A: Optional[Any] = os.path.join(SCREAMING_SNAKE_CASE__ , '''vocab.json''' ) if not os.path.isdir(SCREAMING_SNAKE_CASE__ ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(SCREAMING_SNAKE_CASE__ ) ) return os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ ) with open(SCREAMING_SNAKE_CASE__ , '''w''' , encoding='''utf-8''' ) as vocab_handle: json.dump(target_dict.indices , SCREAMING_SNAKE_CASE__ ) A: Optional[Any] = WavaVecaCTCTokenizer( SCREAMING_SNAKE_CASE__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=SCREAMING_SNAKE_CASE__ , ) A: Any = True if config.feat_extract_norm == '''layer''' else False A: List[Any] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , ) A: Any = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ ) processor.save_pretrained(SCREAMING_SNAKE_CASE__ ) A: str = HubertForCTC(SCREAMING_SNAKE_CASE__ ) else: A: Union[str, Any] = HubertModel(SCREAMING_SNAKE_CASE__ ) if is_finetuned: A: Tuple = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) else: A: Tuple = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) A: Dict = model[0].eval() recursively_load_weights(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not''' ) UpperCamelCase = parser.parse_args() convert_hubert_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
319
import sys def _A ( SCREAMING_SNAKE_CASE__ : List[str] ): UpperCamelCase :Any = len(SCREAMING_SNAKE_CASE__ ) UpperCamelCase :Any = [[0 for x in range(SCREAMING_SNAKE_CASE__ )] for x in range(SCREAMING_SNAKE_CASE__ )] UpperCamelCase :List[Any] = [[0 for x in range(SCREAMING_SNAKE_CASE__ )] for x in range(SCREAMING_SNAKE_CASE__ )] for chain_length in range(2 , SCREAMING_SNAKE_CASE__ ): for a in range(1 , n - chain_length + 1 ): UpperCamelCase :Optional[Any] = a + chain_length - 1 UpperCamelCase :int = sys.maxsize for c in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): UpperCamelCase :Any = ( matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b] ) if cost < matrix[a][b]: UpperCamelCase :int = cost UpperCamelCase :List[str] = c return matrix, sol def _A ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ): if i == j: print('''A''' + str(SCREAMING_SNAKE_CASE__ ) , end=''' ''' ) else: print('''(''' , end=''' ''' ) print_optiomal_solution(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , optimal_solution[i][j] ) print_optiomal_solution(SCREAMING_SNAKE_CASE__ , optimal_solution[i][j] + 1 , SCREAMING_SNAKE_CASE__ ) print(''')''' , end=''' ''' ) def _A ( ): UpperCamelCase :Optional[int] = [30, 35, 15, 5, 10, 20, 25] UpperCamelCase :Optional[Any] = len(SCREAMING_SNAKE_CASE__ ) # Size of matrix created from above array will be # 30*35 35*15 15*5 5*10 10*20 20*25 UpperCamelCase , UpperCamelCase :Dict = matrix_chain_order(SCREAMING_SNAKE_CASE__ ) print('''No. of Operation required: ''' + str(matrix[1][n - 1] ) ) print_optiomal_solution(SCREAMING_SNAKE_CASE__ , 1 , n - 1 ) if __name__ == "__main__": main()
259
0
import math def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> bool: if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(__UpperCamelCase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int = 1_0001 ) -> int: try: UpperCAmelCase_ = int(__UpperCamelCase ) except (TypeError, ValueError): raise TypeError('''Parameter nth must be int or castable to int.''' ) from None if nth <= 0: raise ValueError('''Parameter nth must be greater than or equal to one.''' ) UpperCAmelCase_ = [] UpperCAmelCase_ = 2 while len(__UpperCamelCase ) < nth: if is_prime(__UpperCamelCase ): primes.append(__UpperCamelCase ) num += 1 else: num += 1 return primes[len(__UpperCamelCase ) - 1] if __name__ == "__main__": print(F"{solution() = }")
355
from typing import List, Optional, Union import numpy as np from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging _lowerCamelCase = logging.get_logger(__name__) class a ( _A ): '''simple docstring''' lowerCAmelCase : str = ['input_values', 'padding_mask'] def __init__( self : Optional[Any] , __snake_case : int = 1 , __snake_case : int = 2_40_00 , __snake_case : float = 0.0 , __snake_case : float = None , __snake_case : float = None , **__snake_case : Dict , ): super().__init__(feature_size=__snake_case , sampling_rate=__snake_case , padding_value=__snake_case , **__snake_case ) UpperCAmelCase_ = chunk_length_s UpperCAmelCase_ = overlap @property def lowerCamelCase_ ( self : List[str] ): if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate ) @property def lowerCamelCase_ ( self : List[str] ): if self.chunk_length_s is None or self.overlap is None: return None else: return max(1 , int((1.0 - self.overlap) * self.chunk_length ) ) def __call__( self : List[str] , __snake_case : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __snake_case : Optional[Union[bool, str, PaddingStrategy]] = None , __snake_case : Optional[bool] = False , __snake_case : Optional[int] = None , __snake_case : Optional[Union[str, TensorType]] = None , __snake_case : Optional[int] = None , ): if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'The model corresponding to this feature extractor: {self} was trained using a sampling rate of' F' {self.sampling_rate}. Please make sure that the provided audio input was sampled with' F' {self.sampling_rate} and not {sampling_rate}.' ) else: logger.warning( '''It is strongly recommended to pass the `sampling_rate` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) if padding and truncation: raise ValueError('''Both padding and truncation were set. Make sure you only set one.''' ) elif padding is None: # by default let's pad the inputs UpperCAmelCase_ = True UpperCAmelCase_ = bool( isinstance(__snake_case , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) ) if is_batched: UpperCAmelCase_ = [np.asarray(__snake_case , dtype=np.floataa ).T for audio in raw_audio] elif not is_batched and not isinstance(__snake_case , np.ndarray ): UpperCAmelCase_ = np.asarray(__snake_case , dtype=np.floataa ) elif isinstance(__snake_case , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ): UpperCAmelCase_ = raw_audio.astype(np.floataa ) # always return batch if not is_batched: UpperCAmelCase_ = [np.asarray(__snake_case ).T] # verify inputs are valid for idx, example in enumerate(__snake_case ): if example.ndim > 2: raise ValueError(F'Expected input shape (channels, length) but got shape {example.shape}' ) if self.feature_size == 1 and example.ndim != 1: raise ValueError(F'Expected mono audio but example has {example.shape[-1]} channels' ) if self.feature_size == 2 and example.shape[-1] != 2: raise ValueError(F'Expected stereo audio but example has {example.shape[-1]} channels' ) UpperCAmelCase_ = None UpperCAmelCase_ = BatchFeature({'''input_values''': raw_audio} ) if self.chunk_stride is not None and self.chunk_length is not None and max_length is None: if truncation: UpperCAmelCase_ = min(array.shape[0] for array in raw_audio ) UpperCAmelCase_ = int(np.floor(max_length / self.chunk_stride ) ) UpperCAmelCase_ = (nb_step - 1) * self.chunk_stride + self.chunk_length elif padding: UpperCAmelCase_ = max(array.shape[0] for array in raw_audio ) UpperCAmelCase_ = int(np.ceil(max_length / self.chunk_stride ) ) UpperCAmelCase_ = (nb_step - 1) * self.chunk_stride + self.chunk_length UpperCAmelCase_ = '''max_length''' else: UpperCAmelCase_ = input_values # normal padding on batch if padded_inputs is None: UpperCAmelCase_ = self.pad( __snake_case , max_length=__snake_case , truncation=__snake_case , padding=__snake_case , return_attention_mask=__snake_case , ) if padding: UpperCAmelCase_ = padded_inputs.pop('''attention_mask''' ) UpperCAmelCase_ = [] for example in padded_inputs.pop('''input_values''' ): if self.feature_size == 1: UpperCAmelCase_ = example[..., None] input_values.append(example.T ) UpperCAmelCase_ = input_values if return_tensors is not None: UpperCAmelCase_ = padded_inputs.convert_to_tensors(__snake_case ) return padded_inputs
177
0
from typing import TYPE_CHECKING from ...utils import _LazyModule __a :Any = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']} if TYPE_CHECKING: from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer else: import sys __a :Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
312
from unittest import TestCase from datasets import Dataset from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters def __snake_case ( ): """simple docstring""" A_ = { "repo_name": ["test_repo1", "test_repo2", "test_repo3"], "path": ["test_1.py", "test_2.py", "unit_test.py"], "content": ["a " * 20, "a " * 30, "b " * 7], } A_ = Dataset.from_dict(__UpperCamelCase ) return dataset class _a ( snake_case_ ): """simple docstring""" def __A ( self : Union[str, Any] ): A_ = get_dataset() A_ = make_duplicate_clusters(UpperCAmelCase , 0.85 ) self.assertEqual(len(duplicate_clusters[0] ) , 2 ) def __A ( self : List[Any] ): A_ = get_dataset() A_ , A_ = deduplicate_dataset(UpperCAmelCase ) self.assertEqual(len(UpperCAmelCase ) , 2 ) print(UpperCAmelCase ) self.assertEqual(duplicate_clusters[0][0]["copies"] , 2 ) self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , UpperCAmelCase )
312
1
'''simple docstring''' def __UpperCamelCase ( _UpperCAmelCase ): __UpperCAmelCase : Tuple = hex_num.strip() if not hex_num: raise ValueError("No value was passed to the function" ) __UpperCAmelCase : List[Any] = hex_num[0] == "-" if is_negative: __UpperCAmelCase : int = hex_num[1:] try: __UpperCAmelCase : List[Any] = int(_UpperCAmelCase, 16 ) except ValueError: raise ValueError("Invalid value was passed to the function" ) __UpperCAmelCase : Any = "" while int_num > 0: __UpperCAmelCase : int = str(int_num % 2 ) + bin_str int_num >>= 1 return int(("-" + bin_str) if is_negative else bin_str ) if __name__ == "__main__": import doctest doctest.testmod()
369
'''simple docstring''' from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax lowerCAmelCase__ : Optional[int] = logging.get_logger(__name__) @add_end_docstrings(snake_case__ ) class SCREAMING_SNAKE_CASE__ ( snake_case__ ): """simple docstring""" def __init__( self : List[Any] , **UpperCAmelCase_ : Dict ): """simple docstring""" super().__init__(**UpperCAmelCase_ ) requires_backends(self , "vision" ) self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if self.framework == "tf" else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING ) def __call__( self : List[str] , UpperCAmelCase_ : Union[str, List[str], "Image", List["Image"]] , **UpperCAmelCase_ : Tuple ): """simple docstring""" return super().__call__(UpperCAmelCase_ , **UpperCAmelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] , **UpperCAmelCase_ : Optional[Any] ): """simple docstring""" __UpperCAmelCase : List[Any] = {} if "candidate_labels" in kwargs: __UpperCAmelCase : Union[str, Any] = kwargs["candidate_labels"] if "hypothesis_template" in kwargs: __UpperCAmelCase : int = kwargs["hypothesis_template"] return preprocess_params, {}, {} def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Optional[Any]="This is a photo of {}." ): """simple docstring""" __UpperCAmelCase : Tuple = load_image(UpperCAmelCase_ ) __UpperCAmelCase : List[Any] = self.image_processor(images=[image] , return_tensors=self.framework ) __UpperCAmelCase : Dict = candidate_labels __UpperCAmelCase : Any = [hypothesis_template.format(UpperCAmelCase_ ) for x in candidate_labels] __UpperCAmelCase : Optional[int] = self.tokenizer(UpperCAmelCase_ , return_tensors=self.framework , padding=UpperCAmelCase_ ) __UpperCAmelCase : List[Any] = [text_inputs] return inputs def lowerCamelCase_ ( self : Tuple , UpperCAmelCase_ : Tuple ): """simple docstring""" __UpperCAmelCase : Union[str, Any] = model_inputs.pop("candidate_labels" ) __UpperCAmelCase : str = model_inputs.pop("text_inputs" ) if isinstance(text_inputs[0] , UpperCAmelCase_ ): __UpperCAmelCase : Tuple = text_inputs[0] else: # Batching case. __UpperCAmelCase : Optional[int] = text_inputs[0][0] __UpperCAmelCase : Any = self.model(**UpperCAmelCase_ , **UpperCAmelCase_ ) __UpperCAmelCase : Dict = { "candidate_labels": candidate_labels, "logits": outputs.logits_per_image, } return model_outputs def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase_ : Dict ): """simple docstring""" __UpperCAmelCase : Any = model_outputs.pop("candidate_labels" ) __UpperCAmelCase : Tuple = model_outputs["logits"][0] if self.framework == "pt": __UpperCAmelCase : Union[str, Any] = logits.softmax(dim=-1 ).squeeze(-1 ) __UpperCAmelCase : Dict = probs.tolist() if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): __UpperCAmelCase : List[Any] = [scores] elif self.framework == "tf": __UpperCAmelCase : Union[str, Any] = stable_softmax(UpperCAmelCase_ , axis=-1 ) __UpperCAmelCase : List[str] = probs.numpy().tolist() else: raise ValueError(f"Unsupported framework: {self.framework}" ) __UpperCAmelCase : Dict = [ {"score": score, "label": candidate_label} for score, candidate_label in sorted(zip(UpperCAmelCase_ , UpperCAmelCase_ ) , key=lambda UpperCAmelCase_ : -x[0] ) ] return result
37
0
import torch from diffusers import StableDiffusionPipeline _lowerCamelCase : int = '''path-to-your-trained-model''' _lowerCamelCase : str = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('''cuda''') _lowerCamelCase : List[Any] = '''A photo of sks dog in a bucket''' _lowerCamelCase : Optional[int] = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0] image.save('''dog-bucket.png''')
282
import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class SCREAMING_SNAKE_CASE__ : '''simple docstring''' def __init__( self : Dict , lowercase : str , lowercase : List[str]=13 , lowercase : Any=7 , lowercase : Dict=True , lowercase : str=True , lowercase : List[Any]=True , lowercase : Any=True , lowercase : Tuple=99 , lowercase : str=24 , lowercase : str=2 , lowercase : Any=6 , lowercase : Dict=37 , lowercase : List[str]="gelu" , lowercase : Dict=0.1 , lowercase : Tuple=0.1 , lowercase : Optional[Any]=512 , lowercase : List[Any]=16 , lowercase : str=2 , lowercase : int=0.02 , lowercase : List[Any]=3 , lowercase : List[Any]=None , lowercase : int=1_000 , ): '''simple docstring''' _snake_case = parent _snake_case = batch_size _snake_case = seq_length _snake_case = is_training _snake_case = use_input_mask _snake_case = use_token_type_ids _snake_case = use_labels _snake_case = vocab_size _snake_case = hidden_size _snake_case = num_hidden_layers _snake_case = num_attention_heads _snake_case = intermediate_size _snake_case = hidden_act _snake_case = hidden_dropout_prob _snake_case = attention_probs_dropout_prob _snake_case = max_position_embeddings _snake_case = type_vocab_size _snake_case = type_sequence_label_size _snake_case = initializer_range _snake_case = num_labels _snake_case = scope _snake_case = range_bbox def A ( self : List[Any] ): '''simple docstring''' _snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _snake_case = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: _snake_case = bbox[i, j, 3] _snake_case = bbox[i, j, 1] _snake_case = t if bbox[i, j, 2] < bbox[i, j, 0]: _snake_case = bbox[i, j, 2] _snake_case = bbox[i, j, 0] _snake_case = t _snake_case = None if self.use_input_mask: _snake_case = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) _snake_case = None if self.use_token_type_ids: _snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _snake_case = None _snake_case = None if self.use_labels: _snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _snake_case = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def A ( self : List[str] ): '''simple docstring''' return LiltConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def A ( self : str , lowercase : Tuple , lowercase : Tuple , lowercase : str , lowercase : Any , lowercase : Union[str, Any] , lowercase : List[str] , lowercase : str , ): '''simple docstring''' _snake_case = LiltModel(config=lowercase ) model.to(lowercase ) model.eval() _snake_case = model(lowercase , bbox=lowercase , attention_mask=lowercase , token_type_ids=lowercase ) _snake_case = model(lowercase , bbox=lowercase , token_type_ids=lowercase ) _snake_case = model(lowercase , bbox=lowercase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def A ( self : List[Any] , lowercase : int , lowercase : int , lowercase : Any , lowercase : Optional[int] , lowercase : Union[str, Any] , lowercase : Optional[Any] , lowercase : Optional[int] , ): '''simple docstring''' _snake_case = self.num_labels _snake_case = LiltForTokenClassification(config=lowercase ) model.to(lowercase ) model.eval() _snake_case = model( lowercase , bbox=lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def A ( self : List[str] , lowercase : Union[str, Any] , lowercase : str , lowercase : Dict , lowercase : Optional[int] , lowercase : List[str] , lowercase : int , lowercase : int , ): '''simple docstring''' _snake_case = LiltForQuestionAnswering(config=lowercase ) model.to(lowercase ) model.eval() _snake_case = model( lowercase , bbox=lowercase , attention_mask=lowercase , token_type_ids=lowercase , start_positions=lowercase , end_positions=lowercase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def A ( self : Optional[Any] ): '''simple docstring''' _snake_case = self.prepare_config_and_inputs() ( ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ) = config_and_inputs _snake_case = { 'input_ids': input_ids, 'bbox': bbox, 'token_type_ids': token_type_ids, 'attention_mask': input_mask, } return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,unittest.TestCase ): '''simple docstring''' _UpperCAmelCase : List[Any] = ( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) _UpperCAmelCase : List[str] = ( { "feature-extraction": LiltModel, "question-answering": LiltForQuestionAnswering, "text-classification": LiltForSequenceClassification, "token-classification": LiltForTokenClassification, "zero-shot": LiltForSequenceClassification, } if is_torch_available() else {} ) _UpperCAmelCase : Optional[Any] = False _UpperCAmelCase : Union[str, Any] = False def A ( self : Dict , lowercase : Dict , lowercase : Optional[int] , lowercase : Optional[int] , lowercase : List[str] , lowercase : Tuple ): '''simple docstring''' return True def A ( self : Optional[Any] ): '''simple docstring''' _snake_case = LiltModelTester(self ) _snake_case = ConfigTester(self , config_class=lowercase , hidden_size=37 ) def A ( self : Any ): '''simple docstring''' self.config_tester.run_common_tests() def A ( self : Dict ): '''simple docstring''' _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase ) def A ( self : List[Any] ): '''simple docstring''' _snake_case = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: _snake_case = type self.model_tester.create_and_check_model(*lowercase ) def A ( self : Any ): '''simple docstring''' _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowercase ) def A ( self : Any ): '''simple docstring''' _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*lowercase ) @slow def A ( self : Union[str, Any] ): '''simple docstring''' for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case = LiltModel.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) @require_torch @slow class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): '''simple docstring''' def A ( self : Tuple ): '''simple docstring''' _snake_case = LiltModel.from_pretrained('SCUT-DLVCLab/lilt-roberta-en-base' ).to(lowercase ) _snake_case = torch.tensor([[1, 2]] , device=lowercase ) _snake_case = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=lowercase ) # forward pass with torch.no_grad(): _snake_case = model(input_ids=lowercase , bbox=lowercase ) _snake_case = torch.Size([1, 2, 768] ) _snake_case = torch.tensor( [[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=lowercase , ) self.assertTrue(outputs.last_hidden_state.shape , lowercase ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , lowercase , atol=1E-3 ) )
282
1
import os from pathlib import Path from unittest.mock import patch import pytest import zstandard as zstd from datasets.download.download_config import DownloadConfig from datasets.utils.file_utils import ( OfflineModeIsEnabled, cached_path, fsspec_get, fsspec_head, ftp_get, ftp_head, get_from_cache, http_get, http_head, ) lowerCamelCase_ = '''\ Text data. Second line of data.''' lowerCamelCase_ = '''file''' @pytest.fixture(scope="""session""" ) def __magic_name__ ( __a : Dict ): '''simple docstring''' UpperCamelCase__ = tmp_path_factory.mktemp("""data""" ) / (FILE_PATH + """.zstd""") UpperCamelCase__ = bytes(__a , """utf-8""" ) with zstd.open(__a , """wb""" ) as f: f.write(__a ) return path @pytest.fixture def __magic_name__ ( __a : List[Any] ): '''simple docstring''' with open(os.path.join(tmpfs.local_root_dir , __a ) , """w""" ) as f: f.write(__a ) return FILE_PATH @pytest.mark.parametrize("""compression_format""" , ["""gzip""", """xz""", """zstd"""] ) def __magic_name__ ( __a : List[str] , __a : Tuple , __a : Union[str, Any] , __a : List[str] , __a : List[Any] , __a : Dict ): '''simple docstring''' UpperCamelCase__ = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_path} UpperCamelCase__ = input_paths[compression_format] UpperCamelCase__ = tmp_path / """cache""" UpperCamelCase__ = DownloadConfig(cache_dir=__a , extract_compressed_file=__a ) UpperCamelCase__ = cached_path(__a , download_config=__a ) with open(__a ) as f: UpperCamelCase__ = f.read() with open(__a ) as f: UpperCamelCase__ = f.read() assert extracted_file_content == expected_file_content @pytest.mark.parametrize("""default_extracted""" , [True, False] ) @pytest.mark.parametrize("""default_cache_dir""" , [True, False] ) def __magic_name__ ( __a : List[str] , __a : Dict , __a : int , __a : Optional[Any] , __a : Optional[Any] ): '''simple docstring''' UpperCamelCase__ = """custom_cache""" UpperCamelCase__ = """custom_extracted_dir""" UpperCamelCase__ = tmp_path / """custom_extracted_path""" if default_extracted: UpperCamelCase__ = ("""downloads""" if default_cache_dir else custom_cache_dir, """extracted""") else: monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_DIR""" , __a ) monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(__a ) ) UpperCamelCase__ = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir) UpperCamelCase__ = xz_file UpperCamelCase__ = ( DownloadConfig(extract_compressed_file=__a ) if default_cache_dir else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=__a ) ) UpperCamelCase__ = cached_path(__a , download_config=__a ) assert Path(__a ).parent.parts[-2:] == expected def __magic_name__ ( __a : Optional[Any] ): '''simple docstring''' UpperCamelCase__ = str(Path(__a ).resolve() ) assert cached_path(__a ) == text_file # relative path UpperCamelCase__ = str(Path(__a ).resolve().relative_to(Path(os.getcwd() ) ) ) assert cached_path(__a ) == text_file def __magic_name__ ( __a : Optional[Any] ): '''simple docstring''' UpperCamelCase__ = str(tmp_path.resolve() / """__missing_file__.txt""" ) with pytest.raises(__a ): cached_path(__a ) # relative path UpperCamelCase__ = """./__missing_file__.txt""" with pytest.raises(__a ): cached_path(__a ) def __magic_name__ ( __a : Union[str, Any] ): '''simple docstring''' UpperCamelCase__ = get_from_cache(f"tmp://{tmpfs_file}" ) with open(__a ) as f: UpperCamelCase__ = f.read() assert output_file_content == FILE_CONTENT @patch("""datasets.config.HF_DATASETS_OFFLINE""" , __a ) def __magic_name__ ( ): '''simple docstring''' with pytest.raises(__a ): cached_path("""https://huggingface.co""" ) @patch("""datasets.config.HF_DATASETS_OFFLINE""" , __a ) def __magic_name__ ( __a : int ): '''simple docstring''' UpperCamelCase__ = tmp_path_factory.mktemp("""data""" ) / """file.html""" with pytest.raises(__a ): http_get("""https://huggingface.co""" , temp_file=__a ) with pytest.raises(__a ): http_head("""https://huggingface.co""" ) @patch("""datasets.config.HF_DATASETS_OFFLINE""" , __a ) def __magic_name__ ( __a : int ): '''simple docstring''' UpperCamelCase__ = tmp_path_factory.mktemp("""data""" ) / """file.html""" with pytest.raises(__a ): ftp_get("""ftp://huggingface.co""" , temp_file=__a ) with pytest.raises(__a ): ftp_head("""ftp://huggingface.co""" ) @patch("""datasets.config.HF_DATASETS_OFFLINE""" , __a ) def __magic_name__ ( __a : List[str] ): '''simple docstring''' UpperCamelCase__ = tmp_path_factory.mktemp("""data""" ) / """file.html""" with pytest.raises(__a ): fsspec_get("""s3://huggingface.co""" , temp_file=__a ) with pytest.raises(__a ): fsspec_head("""s3://huggingface.co""" )
178
import numpy as np from sklearn.datasets import fetch_california_housing from sklearn.metrics import mean_absolute_error, mean_squared_error from sklearn.model_selection import train_test_split from xgboost import XGBRegressor def __magic_name__ ( __a : dict ): '''simple docstring''' return (data["data"], data["target"]) def __magic_name__ ( __a : np.ndarray , __a : np.ndarray , __a : np.ndarray ): '''simple docstring''' UpperCamelCase__ = XGBRegressor(verbosity=0 , random_state=42 ) xgb.fit(__a , __a ) # Predict target for test data UpperCamelCase__ = xgb.predict(__a ) UpperCamelCase__ = predictions.reshape(len(__a ) , 1 ) return predictions def __magic_name__ ( ): '''simple docstring''' UpperCamelCase__ = fetch_california_housing() UpperCamelCase__ , UpperCamelCase__ = data_handling(__a ) UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = train_test_split( __a , __a , test_size=0.25 , random_state=1 ) UpperCamelCase__ = xgboost(__a , __a , __a ) # Error printing print(f"Mean Absolute Error : {mean_absolute_error(__a , __a )}" ) print(f"Mean Square Error : {mean_squared_error(__a , __a )}" ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
178
1
"""simple docstring""" import os import unittest from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer from ...test_tokenization_common import TokenizerTesterMixin class __A ( _a, unittest.TestCase ): """simple docstring""" __lowerCAmelCase = TransfoXLTokenizer __lowerCAmelCase = False __lowerCAmelCase = False def SCREAMING_SNAKE_CASE ( self ) -> List[Any]: super().setUp() a =[ """<unk>""", """[CLS]""", """[SEP]""", """want""", """unwanted""", """wa""", """un""", """running""", """,""", """low""", """l""", ] a =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) def SCREAMING_SNAKE_CASE ( self , **__A ) -> str: a =True return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase ) def SCREAMING_SNAKE_CASE ( self , __A ) -> List[str]: a ="""<unk> UNwanted , running""" a ="""<unk> unwanted, running""" return input_text, output_text def SCREAMING_SNAKE_CASE ( self ) -> Dict: a =TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=__lowerCamelCase ) a =tokenizer.tokenize('''<unk> UNwanted , running''' ) self.assertListEqual(__lowerCamelCase , ['''<unk>''', '''unwanted''', ''',''', '''running'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [0, 4, 8, 7] ) def SCREAMING_SNAKE_CASE ( self ) -> Dict: a =TransfoXLTokenizer(lower_case=__lowerCamelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) def SCREAMING_SNAKE_CASE ( self ) -> Dict: a =TransfoXLTokenizer(lower_case=__lowerCamelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def SCREAMING_SNAKE_CASE ( self ) -> str: a =TransfoXLTokenizer(lower_case=__lowerCamelCase ) a ="""Hello (bracket) and side-scrolled [and] Henry's $5,000 with 3.34 m. What's up!?""" a =[ """Hello""", """(""", """bracket""", """)""", """and""", """side""", """@-@""", """scrolled""", """[""", """and""", """]""", """Henry""", """'s""", """$""", """5""", """@,@""", """000""", """with""", """3""", """@.@""", """34""", """m""", """.""", """What""", """'s""", """up""", """!""", """?""", ] self.assertListEqual(tokenizer.tokenize(__lowerCamelCase ) , __lowerCamelCase ) self.assertEqual(tokenizer.convert_tokens_to_string(__lowerCamelCase ) , __lowerCamelCase ) def SCREAMING_SNAKE_CASE ( self ) -> List[Any]: a =self.get_tokenizer() a =len(__lowerCamelCase ) tokenizer.add_tokens(['''new1''', '''new2'''] ) tokenizer.move_added_token('''new1''' , 1 ) # Check that moved token is not copied (duplicate) self.assertEqual(len(__lowerCamelCase ) , original_len + 2 ) # Check that token is moved to specified id self.assertEqual(tokenizer.encode('''new1''' ) , [1] ) self.assertEqual(tokenizer.decode([1] ) , '''new1''' )
81
import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DiffusionPipeline, EulerDiscreteScheduler, StableDiffusionXLImgaImgPipeline, UNetaDConditionModel, ) from diffusers.utils import floats_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ): snake_case__ : Any = StableDiffusionXLImgaImgPipeline snake_case__ : Tuple = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""} snake_case__ : Tuple = PipelineTesterMixin.required_optional_params - {"""latents"""} snake_case__ : Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS snake_case__ : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS snake_case__ : Tuple = IMAGE_TO_IMAGE_IMAGE_PARAMS def _A ( self : int ): torch.manual_seed(0 ) UpperCamelCase :Any = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , attention_head_dim=(2, 4) , use_linear_projection=__lowerCamelCase , addition_embed_type="""text_time""" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , ) UpperCamelCase :Tuple = EulerDiscreteScheduler( beta_start=0.00085 , beta_end=0.012 , steps_offset=1 , beta_schedule="""scaled_linear""" , timestep_spacing="""leading""" , ) torch.manual_seed(0 ) UpperCamelCase :Union[str, Any] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) UpperCamelCase :Optional[int] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=32 , ) UpperCamelCase :Any = CLIPTextModel(__lowerCamelCase ) UpperCamelCase :List[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=__lowerCamelCase ) UpperCamelCase :List[Any] = CLIPTextModelWithProjection(__lowerCamelCase ) UpperCamelCase :int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=__lowerCamelCase ) UpperCamelCase :Union[str, Any] = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """text_encoder_2""": text_encoder_a, """tokenizer_2""": tokenizer_a, # "safety_checker": None, # "feature_extractor": None, } return components def _A ( self : Tuple , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any]=0 ): UpperCamelCase :Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase ) UpperCamelCase :List[str] = image / 2 + 0.5 if str(__lowerCamelCase ).startswith("""mps""" ): UpperCamelCase :Any = torch.manual_seed(__lowerCamelCase ) else: UpperCamelCase :List[Any] = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase ) UpperCamelCase :str = { """prompt""": """A painting of a squirrel eating a burger""", """image""": image, """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 5.0, """output_type""": """numpy""", """strength""": 0.75, } return inputs def _A ( self : str ): UpperCamelCase :List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator UpperCamelCase :Optional[Any] = self.get_dummy_components() UpperCamelCase :List[Any] = StableDiffusionXLImgaImgPipeline(**__lowerCamelCase ) UpperCamelCase :Any = sd_pipe.to(__lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=__lowerCamelCase ) UpperCamelCase :Optional[Any] = self.get_dummy_inputs(__lowerCamelCase ) UpperCamelCase :Union[str, Any] = sd_pipe(**__lowerCamelCase ).images UpperCamelCase :Union[str, Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) UpperCamelCase :List[Any] = np.array([0.4656, 0.4840, 0.4439, 0.6698, 0.5574, 0.4524, 0.5799, 0.5943, 0.5165] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _A ( self : Dict ): super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 ) def _A ( self : Optional[Any] ): super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) def _A ( self : Union[str, Any] ): pass def _A ( self : Optional[int] ): UpperCamelCase :Union[str, Any] = self.get_dummy_components() UpperCamelCase :Dict = StableDiffusionXLImgaImgPipeline(**__lowerCamelCase ) UpperCamelCase :List[Any] = sd_pipe.to(__lowerCamelCase ) UpperCamelCase :List[str] = sd_pipe.to(__lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=__lowerCamelCase ) # forward without prompt embeds UpperCamelCase :List[Any] = self.get_dummy_inputs(__lowerCamelCase ) UpperCamelCase :int = 3 * ["""this is a negative prompt"""] UpperCamelCase :Union[str, Any] = negative_prompt UpperCamelCase :Union[str, Any] = 3 * [inputs["""prompt"""]] UpperCamelCase :Dict = sd_pipe(**__lowerCamelCase ) UpperCamelCase :Union[str, Any] = output.images[0, -3:, -3:, -1] # forward with prompt embeds UpperCamelCase :Union[str, Any] = self.get_dummy_inputs(__lowerCamelCase ) UpperCamelCase :Optional[int] = 3 * ["""this is a negative prompt"""] UpperCamelCase :Union[str, Any] = 3 * [inputs.pop("""prompt""" )] ( ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ) :Union[str, Any] = sd_pipe.encode_prompt(__lowerCamelCase , negative_prompt=__lowerCamelCase ) UpperCamelCase :Dict = sd_pipe( **__lowerCamelCase , prompt_embeds=__lowerCamelCase , negative_prompt_embeds=__lowerCamelCase , pooled_prompt_embeds=__lowerCamelCase , negative_pooled_prompt_embeds=__lowerCamelCase , ) UpperCamelCase :Union[str, Any] = output.images[0, -3:, -3:, -1] # make sure that it's equal assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4 @slow @require_torch_gpu class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def _A ( self : Tuple ): super().tearDown() gc.collect() torch.cuda.empty_cache() def _A ( self : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : Dict="cpu" , __lowerCamelCase : List[Any]=torch.floataa , __lowerCamelCase : Tuple=0 ): UpperCamelCase :Optional[int] = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase ) UpperCamelCase :Optional[Any] = np.random.RandomState(__lowerCamelCase ).standard_normal((1, 4, 64, 64) ) UpperCamelCase :Dict = torch.from_numpy(__lowerCamelCase ).to(device=__lowerCamelCase , dtype=__lowerCamelCase ) UpperCamelCase :str = { """prompt""": """a photograph of an astronaut riding a horse""", """latents""": latents, """generator""": generator, """num_inference_steps""": 3, """guidance_scale""": 7.5, """output_type""": """numpy""", } return inputs def _A ( self : Optional[Any] ): UpperCamelCase :Any = DiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-base""" ) pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) UpperCamelCase :Optional[Any] = self.get_inputs(__lowerCamelCase ) UpperCamelCase :Optional[int] = pipe(**__lowerCamelCase ).images UpperCamelCase :Dict = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) UpperCamelCase :Union[str, Any] = np.array([0.49493, 0.47896, 0.40798, 0.54214, 0.53212, 0.48202, 0.47656, 0.46329, 0.48506] ) assert np.abs(image_slice - expected_slice ).max() < 7E-3
38
0
'''simple docstring''' from __future__ import annotations from typing import Generic, TypeVar a__ : str = TypeVar('T') class UpperCAmelCase__ ( Generic[T]): def __init__( self , lowercase ) -> None: __UpperCamelCase = data __UpperCamelCase = self __UpperCamelCase = 0 class UpperCAmelCase__ ( Generic[T]): def __init__( self ) -> None: # map from node name to the node object __UpperCamelCase = {} def __lowerCamelCase ( self , lowercase ) -> None: # create a new set with x as its member __UpperCamelCase = DisjointSetTreeNode(lowercase ) def __lowerCamelCase ( self , lowercase ) -> DisjointSetTreeNode[T]: # find the set x belongs to (with path-compression) __UpperCamelCase = self.map[data] if elem_ref != elem_ref.parent: __UpperCamelCase = self.find_set(elem_ref.parent.data ) return elem_ref.parent def __lowerCamelCase ( self , lowercase , lowercase ) -> None: # helper function for union operation if nodea.rank > nodea.rank: __UpperCamelCase = nodea else: __UpperCamelCase = nodea if nodea.rank == nodea.rank: nodea.rank += 1 def __lowerCamelCase ( self , lowercase , lowercase ) -> None: # merge 2 disjoint sets self.link(self.find_set(lowercase ) , self.find_set(lowercase ) ) class UpperCAmelCase__ ( Generic[T]): def __init__( self ) -> None: # connections: map from the node to the neighbouring nodes (with weights) __UpperCamelCase = {} def __lowerCamelCase ( self , lowercase ) -> None: # add a node ONLY if its not present in the graph if node not in self.connections: __UpperCamelCase = {} def __lowerCamelCase ( self , lowercase , lowercase , lowercase ) -> None: # add an edge with the given weight self.add_node(lowercase ) self.add_node(lowercase ) __UpperCamelCase = weight __UpperCamelCase = weight def __lowerCamelCase ( self ) -> GraphUndirectedWeighted[T]: __UpperCamelCase = [] __UpperCamelCase = set() for start in self.connections: for end in self.connections[start]: if (start, end) not in seen: seen.add((end, start) ) edges.append((start, end, self.connections[start][end]) ) edges.sort(key=lambda lowercase : x[2] ) # creating the disjoint set __UpperCamelCase = DisjointSetTree[T]() for node in self.connections: disjoint_set.make_set(lowercase ) # MST generation __UpperCamelCase = 0 __UpperCamelCase = 0 __UpperCamelCase = GraphUndirectedWeighted[T]() while num_edges < len(self.connections ) - 1: __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = edges[index] index += 1 __UpperCamelCase = disjoint_set.find_set(lowercase ) __UpperCamelCase = disjoint_set.find_set(lowercase ) if parent_u != parent_v: num_edges += 1 graph.add_edge(lowercase , lowercase , lowercase ) disjoint_set.union(lowercase , lowercase ) return graph
243
'''simple docstring''' import gc import unittest import numpy as np import torch from diffusers import ( AudioDiffusionPipeline, AutoencoderKL, DDIMScheduler, DDPMScheduler, DiffusionPipeline, Mel, UNetaDConditionModel, UNetaDModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class UpperCAmelCase__ ( unittest.TestCase): def __lowerCamelCase ( self ) -> Optional[int]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def __lowerCamelCase ( self ) -> Any: torch.manual_seed(0 ) __UpperCamelCase = UNetaDModel( sample_size=(3_2, 6_4) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") , ) return model @property def __lowerCamelCase ( self ) -> List[Any]: torch.manual_seed(0 ) __UpperCamelCase = UNetaDConditionModel( sample_size=(6_4, 3_2) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , cross_attention_dim=1_0 , ) return model @property def __lowerCamelCase ( self ) -> Optional[Any]: torch.manual_seed(0 ) __UpperCamelCase = AutoencoderKL( sample_size=(1_2_8, 6_4) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=("""DownEncoderBlock2D""", """DownEncoderBlock2D""") , up_block_types=("""UpDecoderBlock2D""", """UpDecoderBlock2D""") , ) __UpperCamelCase = UNetaDModel( sample_size=(6_4, 3_2) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") , ) return vqvae, unet @slow def __lowerCamelCase ( self ) -> Any: __UpperCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator __UpperCamelCase = Mel( x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , ) __UpperCamelCase = DDPMScheduler() __UpperCamelCase = AudioDiffusionPipeline(vqvae=lowercase , unet=self.dummy_unet , mel=lowercase , scheduler=lowercase ) __UpperCamelCase = pipe.to(lowercase ) pipe.set_progress_bar_config(disable=lowercase ) __UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(4_2 ) __UpperCamelCase = pipe(generator=lowercase , steps=4 ) __UpperCamelCase = output.audios[0] __UpperCamelCase = output.images[0] __UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(4_2 ) __UpperCamelCase = pipe(generator=lowercase , steps=4 , return_dict=lowercase ) __UpperCamelCase = output[0][0] assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length) assert ( image.height == self.dummy_unet.config.sample_size[0] and image.width == self.dummy_unet.config.sample_size[1] ) __UpperCamelCase = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:1_0] __UpperCamelCase = np.frombuffer(image_from_tuple.tobytes() , dtype="""uint8""" )[:1_0] __UpperCamelCase = np.array([6_9, 2_5_5, 2_5_5, 2_5_5, 0, 0, 7_7, 1_8_1, 1_2, 1_2_7] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0 __UpperCamelCase = Mel( x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , ) __UpperCamelCase = DDIMScheduler() __UpperCamelCase = self.dummy_vqvae_and_unet __UpperCamelCase = AudioDiffusionPipeline( vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=lowercase , scheduler=lowercase ) __UpperCamelCase = pipe.to(lowercase ) pipe.set_progress_bar_config(disable=lowercase ) np.random.seed(0 ) __UpperCamelCase = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) ) __UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(4_2 ) __UpperCamelCase = pipe(raw_audio=lowercase , generator=lowercase , start_step=5 , steps=1_0 ) __UpperCamelCase = output.images[0] assert ( image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0] and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1] ) __UpperCamelCase = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:1_0] __UpperCamelCase = np.array([1_2_0, 1_1_7, 1_1_0, 1_0_9, 1_3_8, 1_6_7, 1_3_8, 1_4_8, 1_3_2, 1_2_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 __UpperCamelCase = self.dummy_unet_condition __UpperCamelCase = AudioDiffusionPipeline( vqvae=self.dummy_vqvae_and_unet[0] , unet=lowercase , mel=lowercase , scheduler=lowercase ) __UpperCamelCase = pipe.to(lowercase ) pipe.set_progress_bar_config(disable=lowercase ) np.random.seed(0 ) __UpperCamelCase = torch.rand((1, 1, 1_0) ) __UpperCamelCase = pipe(generator=lowercase , encoding=lowercase ) __UpperCamelCase = output.images[0] __UpperCamelCase = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:1_0] __UpperCamelCase = np.array([1_0_7, 1_0_3, 1_2_0, 1_2_7, 1_4_2, 1_2_2, 1_1_3, 1_2_2, 9_7, 1_1_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 @slow @require_torch_gpu class UpperCAmelCase__ ( unittest.TestCase): def __lowerCamelCase ( self ) -> Tuple: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCamelCase ( self ) -> str: __UpperCamelCase = torch_device __UpperCamelCase = DiffusionPipeline.from_pretrained("""teticio/audio-diffusion-ddim-256""" ) __UpperCamelCase = pipe.to(lowercase ) pipe.set_progress_bar_config(disable=lowercase ) __UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(4_2 ) __UpperCamelCase = pipe(generator=lowercase ) __UpperCamelCase = output.audios[0] __UpperCamelCase = output.images[0] assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length) assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1] __UpperCamelCase = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:1_0] __UpperCamelCase = np.array([1_5_1, 1_6_7, 1_5_4, 1_4_4, 1_2_2, 1_3_4, 1_2_1, 1_0_5, 7_0, 2_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
243
1
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> list[list[int]]: '''simple docstring''' SCREAMING_SNAKE_CASE = [] if len(_SCREAMING_SNAKE_CASE ) == 1: return [nums.copy()] for _ in range(len(_SCREAMING_SNAKE_CASE ) ): SCREAMING_SNAKE_CASE = nums.pop(0 ) SCREAMING_SNAKE_CASE = permute(_SCREAMING_SNAKE_CASE ) for perm in permutations: perm.append(_SCREAMING_SNAKE_CASE ) result.extend(_SCREAMING_SNAKE_CASE ) nums.append(_SCREAMING_SNAKE_CASE ) return result def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Any: '''simple docstring''' def backtrack(_SCREAMING_SNAKE_CASE ): if start == len(_SCREAMING_SNAKE_CASE ) - 1: output.append(nums[:] ) else: for i in range(_SCREAMING_SNAKE_CASE , len(_SCREAMING_SNAKE_CASE ) ): SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = nums[i], nums[start] backtrack(start + 1 ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = nums[i], nums[start] # backtrack SCREAMING_SNAKE_CASE = [] backtrack(0 ) return output if __name__ == "__main__": import doctest # use res to print the data in permute2 function SCREAMING_SNAKE_CASE_ = permutea([1, 2, 3]) print(res) doctest.testmod()
296
from argparse import ArgumentParser from .env import EnvironmentCommand def __lowercase ( ) -> Dict: '''simple docstring''' SCREAMING_SNAKE_CASE = ArgumentParser("""Diffusers CLI tool""" , usage="""diffusers-cli <command> [<args>]""" ) SCREAMING_SNAKE_CASE = parser.add_subparsers(help="""diffusers-cli command helpers""" ) # Register commands EnvironmentCommand.register_subcommand(_SCREAMING_SNAKE_CASE ) # Let's go SCREAMING_SNAKE_CASE = parser.parse_args() if not hasattr(_SCREAMING_SNAKE_CASE , """func""" ): parser.print_help() exit(1 ) # Run SCREAMING_SNAKE_CASE = args.func(_SCREAMING_SNAKE_CASE ) service.run() if __name__ == "__main__": main()
296
1
"""simple docstring""" from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING __A : Dict = logging.get_logger(__name__) @add_end_docstrings(_A ) class __UpperCamelCase ( _A ): def __init__(self : Optional[Any] , *__SCREAMING_SNAKE_CASE : Any , **__SCREAMING_SNAKE_CASE : int): super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) requires_backends(self , "vision") self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == "tf" else MODEL_FOR_VISION_2_SEQ_MAPPING) def SCREAMING_SNAKE_CASE__ (self : str , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : int=None , __SCREAMING_SNAKE_CASE : int=None): A = {} A = {} if prompt is not None: A = prompt if generate_kwargs is not None: A = generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: A = {} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( "'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter," " please use only one") A = max_new_tokens return preprocess_params, forward_kwargs, {} def __call__(self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, List[str], "Image.Image", List["Image.Image"]] , **__SCREAMING_SNAKE_CASE : Union[str, Any]): return super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) def SCREAMING_SNAKE_CASE__ (self : List[Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str=None): A = load_image(__SCREAMING_SNAKE_CASE) if prompt is not None: if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE): raise ValueError( F"""Received an invalid text input, got - {type(__SCREAMING_SNAKE_CASE)} - but expected a single string. """ "Note also that one single text can be provided for conditional image to text generation.") A = self.model.config.model_type if model_type == "git": A = self.image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors=self.framework) A = self.tokenizer(text=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE).input_ids A = [self.tokenizer.cls_token_id] + input_ids A = torch.tensor(__SCREAMING_SNAKE_CASE).unsqueeze(0) model_inputs.update({"input_ids": input_ids}) elif model_type == "pix2struct": A = self.image_processor(images=__SCREAMING_SNAKE_CASE , header_text=__SCREAMING_SNAKE_CASE , return_tensors=self.framework) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation A = self.image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors=self.framework) A = self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=self.framework) model_inputs.update(__SCREAMING_SNAKE_CASE) else: raise ValueError(F"""Model type {model_type} does not support conditional text generation""") else: A = self.image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors=self.framework) if self.model.config.model_type == "git" and prompt is None: A = None return model_inputs def SCREAMING_SNAKE_CASE__ (self : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : str=None): # Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the # pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first. if ( "input_ids" in model_inputs and isinstance(model_inputs["input_ids"] , __SCREAMING_SNAKE_CASE) and all(x is None for x in model_inputs["input_ids"]) ): A = None if generate_kwargs is None: A = {} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. A = model_inputs.pop(self.model.main_input_name) A = self.model.generate(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) return model_outputs def SCREAMING_SNAKE_CASE__ (self : Optional[int] , __SCREAMING_SNAKE_CASE : str): A = [] for output_ids in model_outputs: A = { "generated_text": self.tokenizer.decode( __SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE , ) } records.append(__SCREAMING_SNAKE_CASE) return records
362
"""simple docstring""" import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast @require_vision class __UpperCamelCase ( unittest.TestCase ): def SCREAMING_SNAKE_CASE__ (self : Dict): A = tempfile.mkdtemp() A = BlipImageProcessor() A = GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model") A = BlipaProcessor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) processor.save_pretrained(self.tmpdirname) def SCREAMING_SNAKE_CASE__ (self : Dict , **__SCREAMING_SNAKE_CASE : Any): return AutoProcessor.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE).tokenizer def SCREAMING_SNAKE_CASE__ (self : Tuple , **__SCREAMING_SNAKE_CASE : int): return AutoProcessor.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE).image_processor def SCREAMING_SNAKE_CASE__ (self : Optional[int]): shutil.rmtree(self.tmpdirname) def SCREAMING_SNAKE_CASE__ (self : Any): A = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta)] A = [Image.fromarray(np.moveaxis(__SCREAMING_SNAKE_CASE , 0 , -1)) for x in image_inputs] return image_inputs def SCREAMING_SNAKE_CASE__ (self : Any): A = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor()) processor.save_pretrained(self.tmpdirname) A = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)") A = self.get_image_processor(do_normalize=__SCREAMING_SNAKE_CASE , padding_value=1.0) A = BlipaProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__SCREAMING_SNAKE_CASE , padding_value=1.0) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer , __SCREAMING_SNAKE_CASE) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor , __SCREAMING_SNAKE_CASE) def SCREAMING_SNAKE_CASE__ (self : Optional[Any]): A = self.get_image_processor() A = self.get_tokenizer() A = BlipaProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE) A = self.prepare_image_inputs() A = image_processor(__SCREAMING_SNAKE_CASE , return_tensors="np") A = processor(images=__SCREAMING_SNAKE_CASE , return_tensors="np") for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2) def SCREAMING_SNAKE_CASE__ (self : Tuple): A = self.get_image_processor() A = self.get_tokenizer() A = BlipaProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE) A = "lower newer" A = processor(text=__SCREAMING_SNAKE_CASE) A = tokenizer(__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key]) def SCREAMING_SNAKE_CASE__ (self : Optional[int]): A = self.get_image_processor() A = self.get_tokenizer() A = BlipaProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE) A = "lower newer" A = self.prepare_image_inputs() A = processor(text=__SCREAMING_SNAKE_CASE , images=__SCREAMING_SNAKE_CASE) self.assertListEqual(list(inputs.keys()) , ["pixel_values", "input_ids", "attention_mask"]) # test if it raises when no input is passed with pytest.raises(__SCREAMING_SNAKE_CASE): processor() def SCREAMING_SNAKE_CASE__ (self : List[Any]): A = self.get_image_processor() A = self.get_tokenizer() A = BlipaProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE) A = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] A = processor.batch_decode(__SCREAMING_SNAKE_CASE) A = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) def SCREAMING_SNAKE_CASE__ (self : Optional[int]): A = self.get_image_processor() A = self.get_tokenizer() A = BlipaProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE) A = "lower newer" A = self.prepare_image_inputs() A = processor(text=__SCREAMING_SNAKE_CASE , images=__SCREAMING_SNAKE_CASE) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys()) , ["pixel_values", "input_ids", "attention_mask"])
57
0
def _a ( UpperCAmelCase ) -> str: """simple docstring""" lowerCamelCase__ : str = 0 # if input_string is "aba" than new_input_string become "a|b|a" lowerCamelCase__ : Optional[Any] = '''''' lowerCamelCase__ : Optional[Any] = '''''' # append each character + "|" in new_string for range(0, length-1) for i in input_string[: len(UpperCAmelCase ) - 1]: new_input_string += i + "|" # append last character new_input_string += input_string[-1] # we will store the starting and ending of previous furthest ending palindromic # substring lowerCamelCase__ , lowerCamelCase__ : Dict = 0, 0 # length[i] shows the length of palindromic substring with center i lowerCamelCase__ : Tuple = [1 for i in range(len(UpperCAmelCase ) )] # for each character in new_string find corresponding palindromic string lowerCamelCase__ : List[Any] = 0 for j in range(len(UpperCAmelCase ) ): lowerCamelCase__ : Union[str, Any] = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 ) while ( j - k >= 0 and j + k < len(UpperCAmelCase ) and new_input_string[k + j] == new_input_string[j - k] ): k += 1 lowerCamelCase__ : List[str] = 2 * k - 1 # does this string is ending after the previously explored end (that is r) ? # if yes the update the new r to the last index of this if j + k - 1 > r: lowerCamelCase__ : Union[str, Any] = j - k + 1 # noqa: E741 lowerCamelCase__ : str = j + k - 1 # update max_length and start position if max_length < length[j]: lowerCamelCase__ : Optional[int] = length[j] lowerCamelCase__ : Optional[Any] = j # create that string lowerCamelCase__ : str = new_input_string[start - max_length // 2 : start + max_length // 2 + 1] for i in s: if i != "|": output_string += i return output_string if __name__ == "__main__": import doctest doctest.testmod()
142
from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ): _UpperCAmelCase : Optional[int] = ["image_processor", "tokenizer"] _UpperCAmelCase : Dict = "BridgeTowerImageProcessor" _UpperCAmelCase : Dict = ("RobertaTokenizer", "RobertaTokenizerFast") def __init__( self : Any , A : List[Any] , A : Tuple ) ->Dict: super().__init__(A , A ) def __call__( self : str , A : int , A : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , A : bool = True , A : Union[bool, str, PaddingStrategy] = False , A : Union[bool, str, TruncationStrategy] = None , A : Optional[int] = None , A : int = 0 , A : Optional[int] = None , A : Optional[bool] = None , A : Optional[bool] = None , A : bool = False , A : bool = False , A : bool = False , A : bool = False , A : bool = True , A : Optional[Union[str, TensorType]] = None , **A : Union[str, Any] , ) ->BatchEncoding: lowerCamelCase__ : Optional[int] = self.tokenizer( text=A , add_special_tokens=A , padding=A , truncation=A , max_length=A , stride=A , pad_to_multiple_of=A , return_token_type_ids=A , return_attention_mask=A , return_overflowing_tokens=A , return_special_tokens_mask=A , return_offsets_mapping=A , return_length=A , verbose=A , return_tensors=A , **A , ) # add pixel_values + pixel_mask lowerCamelCase__ : int = self.image_processor( A , return_tensors=A , do_normalize=A , do_center_crop=A , **A ) encoding.update(A ) return encoding def __lowerCamelCase ( self : str , *A : Dict , **A : List[str] ) ->List[Any]: return self.tokenizer.batch_decode(*A , **A ) def __lowerCamelCase ( self : List[str] , *A : Optional[Any] , **A : Tuple ) ->Dict: return self.tokenizer.decode(*A , **A ) @property def __lowerCamelCase ( self : str ) ->Optional[Any]: lowerCamelCase__ : Optional[int] = self.tokenizer.model_input_names lowerCamelCase__ : Optional[int] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
142
1
'''simple docstring''' import os # Precomputes a list of the 100 first triangular numbers __lowerCAmelCase = [int(0.5 * n * (n + 1)) for n in range(1, 1_0_1)] def UpperCAmelCase_ (): """simple docstring""" _a : Tuple = os.path.dirname(os.path.realpath(__a ) ) _a : int = os.path.join(__a , 'words.txt' ) _a : Union[str, Any] = '' with open(__a ) as f: _a : Dict = f.readline() _a : Tuple = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )] _a : str = [ word for word in [sum(ord(__a ) - 6_4 for x in word ) for word in words] if word in TRIANGULAR_NUMBERS ] return len(__a ) if __name__ == "__main__": print(solution())
5
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __lowerCAmelCase = { """configuration_squeezebert""": [ """SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SqueezeBertConfig""", """SqueezeBertOnnxConfig""", ], """tokenization_squeezebert""": ["""SqueezeBertTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = ["""SqueezeBertTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = [ """SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """SqueezeBertForMaskedLM""", """SqueezeBertForMultipleChoice""", """SqueezeBertForQuestionAnswering""", """SqueezeBertForSequenceClassification""", """SqueezeBertForTokenClassification""", """SqueezeBertModel""", """SqueezeBertModule""", """SqueezeBertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_squeezebert import ( SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, SqueezeBertConfig, SqueezeBertOnnxConfig, ) from .tokenization_squeezebert import SqueezeBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_squeezebert import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, SqueezeBertModule, SqueezeBertPreTrainedModel, ) else: import sys __lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
5
1
from typing import Any, Dict, Optional import torch import torch.nn.functional as F from torch import nn from ..utils import maybe_allow_in_graph from .activations import get_activation from .attention_processor import Attention from .embeddings import CombinedTimestepLabelEmbeddings @maybe_allow_in_graph class __SCREAMING_SNAKE_CASE ( nn.Module ): def __init__( self : Dict , A : int , A : int , A : int , A : Union[str, Any]=0.0 , A : Optional[int] = None , A : str = "geglu" , A : Optional[int] = None , A : bool = False , A : bool = False , A : bool = False , A : bool = False , A : bool = True , A : str = "layer_norm" , A : bool = False , ) ->Any: super().__init__() lowerCamelCase__ : int = only_cross_attention lowerCamelCase__ : Optional[Any] = (num_embeds_ada_norm is not None) and norm_type == '''ada_norm_zero''' lowerCamelCase__ : Union[str, Any] = (num_embeds_ada_norm is not None) and norm_type == '''ada_norm''' if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None: raise ValueError( F"`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to" F" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}." ) # Define 3 blocks. Each block has its own normalization layer. # 1. Self-Attn if self.use_ada_layer_norm: lowerCamelCase__ : Optional[Any] = AdaLayerNorm(A , A ) elif self.use_ada_layer_norm_zero: lowerCamelCase__ : int = AdaLayerNormZero(A , A ) else: lowerCamelCase__ : Dict = nn.LayerNorm(A , elementwise_affine=A ) lowerCamelCase__ : Any = Attention( query_dim=A , heads=A , dim_head=A , dropout=A , bias=A , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=A , ) # 2. Cross-Attn if cross_attention_dim is not None or double_self_attention: # We currently only use AdaLayerNormZero for self attention where there will only be one attention block. # I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during # the second cross attention block. lowerCamelCase__ : Tuple = ( AdaLayerNorm(A , A ) if self.use_ada_layer_norm else nn.LayerNorm(A , elementwise_affine=A ) ) lowerCamelCase__ : int = Attention( query_dim=A , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=A , dim_head=A , dropout=A , bias=A , upcast_attention=A , ) # is self-attn if encoder_hidden_states is none else: lowerCamelCase__ : Dict = None lowerCamelCase__ : Tuple = None # 3. Feed-forward lowerCamelCase__ : Optional[int] = nn.LayerNorm(A , elementwise_affine=A ) lowerCamelCase__ : Union[str, Any] = FeedForward(A , dropout=A , activation_fn=A , final_dropout=A ) # let chunk size default to None lowerCamelCase__ : str = None lowerCamelCase__ : Tuple = 0 def __lowerCamelCase ( self : Any , A : Optional[int] , A : int ) ->List[str]: # Sets chunk feed-forward lowerCamelCase__ : List[Any] = chunk_size lowerCamelCase__ : List[str] = dim def __lowerCamelCase ( self : str , A : torch.FloatTensor , A : Optional[torch.FloatTensor] = None , A : Optional[torch.FloatTensor] = None , A : Optional[torch.FloatTensor] = None , A : Optional[torch.LongTensor] = None , A : Dict[str, Any] = None , A : Optional[torch.LongTensor] = None , ) ->Tuple: # Notice that normalization is always applied before the real computation in the following blocks. # 1. Self-Attention if self.use_ada_layer_norm: lowerCamelCase__ : Union[str, Any] = self.norma(A , A ) elif self.use_ada_layer_norm_zero: lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Any = self.norma( A , A , A , hidden_dtype=hidden_states.dtype ) else: lowerCamelCase__ : List[str] = self.norma(A ) lowerCamelCase__ : str = cross_attention_kwargs if cross_attention_kwargs is not None else {} lowerCamelCase__ : Any = self.attna( A , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=A , **A , ) if self.use_ada_layer_norm_zero: lowerCamelCase__ : Any = gate_msa.unsqueeze(1 ) * attn_output lowerCamelCase__ : Optional[int] = attn_output + hidden_states # 2. Cross-Attention if self.attna is not None: lowerCamelCase__ : int = ( self.norma(A , A ) if self.use_ada_layer_norm else self.norma(A ) ) lowerCamelCase__ : int = self.attna( A , encoder_hidden_states=A , attention_mask=A , **A , ) lowerCamelCase__ : Any = attn_output + hidden_states # 3. Feed-forward lowerCamelCase__ : Union[str, Any] = self.norma(A ) if self.use_ada_layer_norm_zero: lowerCamelCase__ : Optional[int] = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None] if self._chunk_size is not None: # "feed_forward_chunk_size" can be used to save memory if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0: raise ValueError( F"`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`." ) lowerCamelCase__ : Optional[int] = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size lowerCamelCase__ : Optional[int] = torch.cat( [self.ff(A ) for hid_slice in norm_hidden_states.chunk(A , dim=self._chunk_dim )] , dim=self._chunk_dim , ) else: lowerCamelCase__ : Optional[int] = self.ff(A ) if self.use_ada_layer_norm_zero: lowerCamelCase__ : Optional[Any] = gate_mlp.unsqueeze(1 ) * ff_output lowerCamelCase__ : List[Any] = ff_output + hidden_states return hidden_states class __SCREAMING_SNAKE_CASE ( nn.Module ): def __init__( self : Any , A : int , A : Optional[int] = None , A : int = 4 , A : float = 0.0 , A : str = "geglu" , A : bool = False , ) ->int: super().__init__() lowerCamelCase__ : List[Any] = int(dim * mult ) lowerCamelCase__ : List[Any] = dim_out if dim_out is not None else dim if activation_fn == "gelu": lowerCamelCase__ : int = GELU(A , A ) if activation_fn == "gelu-approximate": lowerCamelCase__ : Optional[int] = GELU(A , A , approximate='''tanh''' ) elif activation_fn == "geglu": lowerCamelCase__ : Any = GEGLU(A , A ) elif activation_fn == "geglu-approximate": lowerCamelCase__ : int = ApproximateGELU(A , A ) lowerCamelCase__ : Union[str, Any] = nn.ModuleList([] ) # project in self.net.append(A ) # project dropout self.net.append(nn.Dropout(A ) ) # project out self.net.append(nn.Linear(A , A ) ) # FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout if final_dropout: self.net.append(nn.Dropout(A ) ) def __lowerCamelCase ( self : Dict , A : List[Any] ) ->Optional[Any]: for module in self.net: lowerCamelCase__ : int = module(A ) return hidden_states class __SCREAMING_SNAKE_CASE ( nn.Module ): def __init__( self : Tuple , A : int , A : int , A : str = "none" ) ->Optional[Any]: super().__init__() lowerCamelCase__ : List[Any] = nn.Linear(A , A ) lowerCamelCase__ : Any = approximate def __lowerCamelCase ( self : List[str] , A : Tuple ) ->str: if gate.device.type != "mps": return F.gelu(A , approximate=self.approximate ) # mps: gelu is not implemented for float16 return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype ) def __lowerCamelCase ( self : List[str] , A : str ) ->Optional[int]: lowerCamelCase__ : List[str] = self.proj(A ) lowerCamelCase__ : Optional[int] = self.gelu(A ) return hidden_states class __SCREAMING_SNAKE_CASE ( nn.Module ): def __init__( self : Tuple , A : int , A : int ) ->Dict: super().__init__() lowerCamelCase__ : Optional[Any] = nn.Linear(A , dim_out * 2 ) def __lowerCamelCase ( self : List[Any] , A : List[Any] ) ->Tuple: if gate.device.type != "mps": return F.gelu(A ) # mps: gelu is not implemented for float16 return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype ) def __lowerCamelCase ( self : Any , A : Union[str, Any] ) ->Any: lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.proj(A ).chunk(2 , dim=-1 ) return hidden_states * self.gelu(A ) class __SCREAMING_SNAKE_CASE ( nn.Module ): def __init__( self : Any , A : int , A : int ) ->str: super().__init__() lowerCamelCase__ : Optional[int] = nn.Linear(A , A ) def __lowerCamelCase ( self : Union[str, Any] , A : Dict ) ->Optional[Any]: lowerCamelCase__ : List[str] = self.proj(A ) return x * torch.sigmoid(1.7_02 * x ) class __SCREAMING_SNAKE_CASE ( nn.Module ): def __init__( self : int , A : Dict , A : Optional[Any] ) ->str: super().__init__() lowerCamelCase__ : List[str] = nn.Embedding(A , A ) lowerCamelCase__ : str = nn.SiLU() lowerCamelCase__ : int = nn.Linear(A , embedding_dim * 2 ) lowerCamelCase__ : Optional[Any] = nn.LayerNorm(A , elementwise_affine=A ) def __lowerCamelCase ( self : int , A : Union[str, Any] , A : Union[str, Any] ) ->Union[str, Any]: lowerCamelCase__ : Union[str, Any] = self.linear(self.silu(self.emb(A ) ) ) lowerCamelCase__ , lowerCamelCase__ : List[str] = torch.chunk(A , 2 ) lowerCamelCase__ : Any = self.norm(A ) * (1 + scale) + shift return x class __SCREAMING_SNAKE_CASE ( nn.Module ): def __init__( self : str , A : Optional[Any] , A : int ) ->str: super().__init__() lowerCamelCase__ : Union[str, Any] = CombinedTimestepLabelEmbeddings(A , A ) lowerCamelCase__ : int = nn.SiLU() lowerCamelCase__ : List[str] = nn.Linear(A , 6 * embedding_dim , bias=A ) lowerCamelCase__ : str = nn.LayerNorm(A , elementwise_affine=A , eps=1e-6 ) def __lowerCamelCase ( self : List[str] , A : Any , A : List[Any] , A : Tuple , A : Dict=None ) ->Union[str, Any]: lowerCamelCase__ : List[Any] = self.linear(self.silu(self.emb(A , A , hidden_dtype=A ) ) ) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : int = emb.chunk(6 , dim=1 ) lowerCamelCase__ : List[Any] = self.norm(A ) * (1 + scale_msa[:, None]) + shift_msa[:, None] return x, gate_msa, shift_mlp, scale_mlp, gate_mlp class __SCREAMING_SNAKE_CASE ( nn.Module ): def __init__( self : Any , A : int , A : int , A : int , A : Optional[str] = None , A : float = 1e-5 ) ->Any: super().__init__() lowerCamelCase__ : int = num_groups lowerCamelCase__ : List[str] = eps if act_fn is None: lowerCamelCase__ : Tuple = None else: lowerCamelCase__ : Dict = get_activation(A ) lowerCamelCase__ : Any = nn.Linear(A , out_dim * 2 ) def __lowerCamelCase ( self : List[str] , A : Optional[int] , A : str ) ->Tuple: if self.act: lowerCamelCase__ : Union[str, Any] = self.act(A ) lowerCamelCase__ : Optional[Any] = self.linear(A ) lowerCamelCase__ : Optional[Any] = emb[:, :, None, None] lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = emb.chunk(2 , dim=1 ) lowerCamelCase__ : str = F.group_norm(A , self.num_groups , eps=self.eps ) lowerCamelCase__ : Dict = x * (1 + scale) + shift return x
142
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation def _a ( UpperCAmelCase ) -> List[str]: """simple docstring""" lowerCamelCase__ : Union[str, Any] = 384 if "tiny" in model_name: lowerCamelCase__ : Optional[int] = [3, 3, 9, 3] lowerCamelCase__ : Tuple = [96, 192, 384, 768] if "small" in model_name: lowerCamelCase__ : Dict = [3, 3, 27, 3] lowerCamelCase__ : Any = [96, 192, 384, 768] if "base" in model_name: lowerCamelCase__ : Optional[int] = [3, 3, 27, 3] lowerCamelCase__ : Optional[Any] = [128, 256, 512, 1024] lowerCamelCase__ : List[Any] = 512 if "large" in model_name: lowerCamelCase__ : List[str] = [3, 3, 27, 3] lowerCamelCase__ : int = [192, 384, 768, 1536] lowerCamelCase__ : str = 768 if "xlarge" in model_name: lowerCamelCase__ : Any = [3, 3, 27, 3] lowerCamelCase__ : str = [256, 512, 1024, 2048] lowerCamelCase__ : Optional[Any] = 1024 # set label information lowerCamelCase__ : Optional[int] = 150 lowerCamelCase__ : Any = '''huggingface/label-files''' lowerCamelCase__ : Any = '''ade20k-id2label.json''' lowerCamelCase__ : str = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) ) lowerCamelCase__ : Optional[Any] = {int(UpperCAmelCase ): v for k, v in idalabel.items()} lowerCamelCase__ : List[Any] = {v: k for k, v in idalabel.items()} lowerCamelCase__ : Any = ConvNextConfig( depths=UpperCAmelCase , hidden_sizes=UpperCAmelCase , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] ) lowerCamelCase__ : Dict = UperNetConfig( backbone_config=UpperCAmelCase , auxiliary_in_channels=UpperCAmelCase , num_labels=UpperCAmelCase , idalabel=UpperCAmelCase , labelaid=UpperCAmelCase , ) return config def _a ( UpperCAmelCase ) -> int: """simple docstring""" lowerCamelCase__ : Dict = [] # fmt: off # stem rename_keys.append(('''backbone.downsample_layers.0.0.weight''', '''backbone.embeddings.patch_embeddings.weight''') ) rename_keys.append(('''backbone.downsample_layers.0.0.bias''', '''backbone.embeddings.patch_embeddings.bias''') ) rename_keys.append(('''backbone.downsample_layers.0.1.weight''', '''backbone.embeddings.layernorm.weight''') ) rename_keys.append(('''backbone.downsample_layers.0.1.bias''', '''backbone.embeddings.layernorm.bias''') ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f"backbone.stages.{i}.{j}.gamma", f"backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter") ) rename_keys.append((f"backbone.stages.{i}.{j}.depthwise_conv.weight", f"backbone.encoder.stages.{i}.layers.{j}.dwconv.weight") ) rename_keys.append((f"backbone.stages.{i}.{j}.depthwise_conv.bias", f"backbone.encoder.stages.{i}.layers.{j}.dwconv.bias") ) rename_keys.append((f"backbone.stages.{i}.{j}.norm.weight", f"backbone.encoder.stages.{i}.layers.{j}.layernorm.weight") ) rename_keys.append((f"backbone.stages.{i}.{j}.norm.bias", f"backbone.encoder.stages.{i}.layers.{j}.layernorm.bias") ) rename_keys.append((f"backbone.stages.{i}.{j}.pointwise_conv1.weight", f"backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight") ) rename_keys.append((f"backbone.stages.{i}.{j}.pointwise_conv1.bias", f"backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias") ) rename_keys.append((f"backbone.stages.{i}.{j}.pointwise_conv2.weight", f"backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight") ) rename_keys.append((f"backbone.stages.{i}.{j}.pointwise_conv2.bias", f"backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias") ) if i > 0: rename_keys.append((f"backbone.downsample_layers.{i}.0.weight", f"backbone.encoder.stages.{i}.downsampling_layer.0.weight") ) rename_keys.append((f"backbone.downsample_layers.{i}.0.bias", f"backbone.encoder.stages.{i}.downsampling_layer.0.bias") ) rename_keys.append((f"backbone.downsample_layers.{i}.1.weight", f"backbone.encoder.stages.{i}.downsampling_layer.1.weight") ) rename_keys.append((f"backbone.downsample_layers.{i}.1.bias", f"backbone.encoder.stages.{i}.downsampling_layer.1.bias") ) rename_keys.append((f"backbone.norm{i}.weight", f"backbone.hidden_states_norms.stage{i+1}.weight") ) rename_keys.append((f"backbone.norm{i}.bias", f"backbone.hidden_states_norms.stage{i+1}.bias") ) # decode head rename_keys.extend( [ ('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''), ('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''), ('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''), ('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''), ] ) # fmt: on return rename_keys def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]: """simple docstring""" lowerCamelCase__ : str = dct.pop(UpperCAmelCase ) lowerCamelCase__ : List[Any] = val def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict: """simple docstring""" lowerCamelCase__ : str = { '''upernet-convnext-tiny''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth''', '''upernet-convnext-small''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth''', '''upernet-convnext-base''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth''', '''upernet-convnext-large''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth''', '''upernet-convnext-xlarge''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth''', } lowerCamelCase__ : Union[str, Any] = model_name_to_url[model_name] lowerCamelCase__ : int = torch.hub.load_state_dict_from_url(UpperCAmelCase , map_location='''cpu''' )['''state_dict'''] lowerCamelCase__ : List[str] = get_upernet_config(UpperCAmelCase ) lowerCamelCase__ : Tuple = UperNetForSemanticSegmentation(UpperCAmelCase ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): lowerCamelCase__ : Optional[int] = state_dict.pop(UpperCAmelCase ) if "bn" in key: lowerCamelCase__ : str = key.replace('''bn''' , '''batch_norm''' ) lowerCamelCase__ : List[Any] = val # rename keys lowerCamelCase__ : List[str] = create_rename_keys(UpperCAmelCase ) for src, dest in rename_keys: rename_key(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) model.load_state_dict(UpperCAmelCase ) # verify on image lowerCamelCase__ : Any = '''https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg''' lowerCamelCase__ : List[str] = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw ).convert('''RGB''' ) lowerCamelCase__ : Optional[int] = SegformerImageProcessor() lowerCamelCase__ : Any = processor(UpperCAmelCase , return_tensors='''pt''' ).pixel_values with torch.no_grad(): lowerCamelCase__ : List[Any] = model(UpperCAmelCase ) if model_name == "upernet-convnext-tiny": lowerCamelCase__ : Any = torch.tensor( [[-8.81_10, -8.81_10, -8.65_21], [-8.81_10, -8.81_10, -8.65_21], [-8.77_46, -8.77_46, -8.61_30]] ) elif model_name == "upernet-convnext-small": lowerCamelCase__ : List[str] = torch.tensor( [[-8.82_36, -8.82_36, -8.67_71], [-8.82_36, -8.82_36, -8.67_71], [-8.76_38, -8.76_38, -8.62_40]] ) elif model_name == "upernet-convnext-base": lowerCamelCase__ : str = torch.tensor( [[-8.85_58, -8.85_58, -8.69_05], [-8.85_58, -8.85_58, -8.69_05], [-8.76_69, -8.76_69, -8.60_21]] ) elif model_name == "upernet-convnext-large": lowerCamelCase__ : Optional[int] = torch.tensor( [[-8.66_60, -8.66_60, -8.62_10], [-8.66_60, -8.66_60, -8.62_10], [-8.63_10, -8.63_10, -8.59_64]] ) elif model_name == "upernet-convnext-xlarge": lowerCamelCase__ : Tuple = torch.tensor( [[-8.49_80, -8.49_80, -8.39_77], [-8.49_80, -8.49_80, -8.39_77], [-8.43_79, -8.43_79, -8.34_12]] ) print('''Logits:''' , outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3] , UpperCAmelCase , atol=1E-4 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: print(f"Saving model {model_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(UpperCAmelCase ) print(f"Saving processor to {pytorch_dump_folder_path}" ) processor.save_pretrained(UpperCAmelCase ) if push_to_hub: print(f"Pushing model and processor for {model_name} to hub" ) model.push_to_hub(f"openmmlab/{model_name}" ) processor.push_to_hub(f"openmmlab/{model_name}" ) if __name__ == "__main__": _A : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='upernet-convnext-tiny', type=str, choices=[F'''upernet-convnext-{size}''' for size in ['tiny', 'small', 'base', 'large', 'xlarge']], help='Name of the ConvNext UperNet model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) _A : Tuple = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
142
1
import argparse import os import pickle import sys import torch from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() # We do this to be able to load python 2 datasets pickles # See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918 __A = data_utils.TransfoXLTokenizer __A = data_utils.TransfoXLCorpus __A = data_utils __A = data_utils def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] ) -> Optional[int]: """simple docstring""" if transfo_xl_dataset_file: # Convert a pre-processed corpus (see original TensorFlow repo) with open(UpperCamelCase__ , 'rb' ) as fp: __lowerCamelCase = pickle.load(UpperCamelCase__ , encoding='latin1' ) # Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term) __lowerCamelCase = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['pretrained_vocab_file'] print(F"""Save vocabulary to {pytorch_vocab_dump_path}""" ) __lowerCamelCase = corpus.vocab.__dict__ torch.save(UpperCamelCase__ , UpperCamelCase__ ) __lowerCamelCase = corpus.__dict__ corpus_dict_no_vocab.pop('vocab' , UpperCamelCase__ ) __lowerCamelCase = pytorch_dump_folder_path + '/' + CORPUS_NAME print(F"""Save dataset to {pytorch_dataset_dump_path}""" ) torch.save(UpperCamelCase__ , UpperCamelCase__ ) if tf_checkpoint_path: # Convert a pre-trained TensorFlow model __lowerCamelCase = os.path.abspath(UpperCamelCase__ ) __lowerCamelCase = os.path.abspath(UpperCamelCase__ ) print(F"""Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.""" ) # Initialise PyTorch model if transfo_xl_config_file == "": __lowerCamelCase = TransfoXLConfig() else: __lowerCamelCase = TransfoXLConfig.from_json_file(UpperCamelCase__ ) print(F"""Building PyTorch model from configuration: {config}""" ) __lowerCamelCase = TransfoXLLMHeadModel(UpperCamelCase__ ) __lowerCamelCase = load_tf_weights_in_transfo_xl(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Save pytorch-model __lowerCamelCase = os.path.join(UpperCamelCase__ , UpperCamelCase__ ) __lowerCamelCase = os.path.join(UpperCamelCase__ , UpperCamelCase__ ) print(F"""Save PyTorch model to {os.path.abspath(UpperCamelCase__ )}""" ) torch.save(model.state_dict() , UpperCamelCase__ ) print(F"""Save configuration file to {os.path.abspath(UpperCamelCase__ )}""" ) with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": __A = argparse.ArgumentParser() parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the folder to store the PyTorch model or dataset/vocab.", ) parser.add_argument( "--tf_checkpoint_path", default="", type=str, help="An optional path to a TensorFlow checkpoint path to be converted.", ) parser.add_argument( "--transfo_xl_config_file", default="", type=str, help=( "An optional config json file corresponding to the pre-trained BERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--transfo_xl_dataset_file", default="", type=str, help="An optional dataset file to be converted in a vocabulary.", ) __A = parser.parse_args() convert_transfo_xl_checkpoint_to_pytorch( args.tf_checkpoint_path, args.transfo_xl_config_file, args.pytorch_dump_folder_path, args.transfo_xl_dataset_file, )
348
from ..utils import DummyObject, requires_backends class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> int: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> str: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[Any]: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[str]: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Tuple: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[Any]: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[int]: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> str: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Tuple: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[Any]: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[int]: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[Any]: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> str: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> int: '''simple docstring''' requires_backends(self , ['sentencepiece'] ) class __lowerCAmelCase ( metaclass=__magic_name__ ): """simple docstring""" snake_case_ = ['''sentencepiece'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> int: '''simple docstring''' requires_backends(self , ['sentencepiece'] )
348
1
"""simple docstring""" from heapq import heappop, heappush import numpy as np def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ): __lowercase ,__lowercase : List[str] = grid.shape __lowercase : str = [-1, 1, 0, 0] __lowercase : Union[str, Any] = [0, 0, -1, 1] if allow_diagonal: dx += [-1, -1, 1, 1] dy += [-1, 1, -1, 1] __lowercase ,__lowercase : str = [(0, source)], set() __lowercase : Any = np.full((rows, cols) , np.inf ) __lowercase : Dict = 0 __lowercase : Optional[int] = np.empty((rows, cols) , dtype=__UpperCamelCase ) __lowercase : int = None while queue: ((__lowercase) ,(__lowercase)) : List[Any] = heappop(__UpperCamelCase ) if (x, y) in visited: continue visited.add((x, y) ) if (x, y) == destination: __lowercase : Dict = [] while (x, y) != source: path.append((x, y) ) __lowercase ,__lowercase : Optional[Any] = predecessors[x, y] path.append(__UpperCamelCase ) # add the source manually path.reverse() return matrix[destination], path for i in range(len(__UpperCamelCase ) ): __lowercase ,__lowercase : int = x + dx[i], y + dy[i] if 0 <= nx < rows and 0 <= ny < cols: __lowercase : List[Any] = grid[nx][ny] if next_node == 1 and matrix[nx, ny] > dist + 1: heappush(__UpperCamelCase , (dist + 1, (nx, ny)) ) __lowercase : Optional[int] = dist + 1 __lowercase : List[Any] = (x, y) return np.inf, [] if __name__ == "__main__": import doctest doctest.testmod()
249
"""simple docstring""" from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Sequence, Value from .base import TaskTemplate @dataclass(frozen=snake_case ) class UpperCAmelCase_ ( snake_case ): # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization UpperCamelCase =field(default="question-answering-extractive" , metadata={"include_in_asdict_even_if_is_default": True} ) UpperCamelCase =Features({"question": Value("string" ), "context": Value("string" )} ) UpperCamelCase =Features( { "answers": Sequence( { "text": Value("string" ), "answer_start": Value("int32" ), } ) } ) UpperCamelCase ="question" UpperCamelCase ="context" UpperCamelCase ="answers" @property def _lowerCamelCase ( self ) -> Dict[str, str]: return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
249
1
import inspect from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch import torch.utils.checkpoint from ...models import UNetaDModel, VQModel from ...schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from ...utils import PIL_INTERPOLATION, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput def UpperCamelCase ( __magic_name__ : Optional[Any] ) -> Optional[Any]: """simple docstring""" lowercase__ = image.size lowercase__ = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 lowercase__ = image.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] ) lowercase__ = np.array(lowerCamelCase_ ).astype(np.floataa ) / 2_5_5.0 lowercase__ = image[None].transpose(0 , 3 , 1 , 2 ) lowercase__ = torch.from_numpy(lowerCamelCase_ ) return 2.0 * image - 1.0 class A ( lowerCamelCase__ ): '''simple docstring''' def __init__(self : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , ) -> Dict: """simple docstring""" super().__init__() self.register_modules(vqvae=__lowerCamelCase , unet=__lowerCamelCase , scheduler=__lowerCamelCase ) @torch.no_grad() def __call__(self : Tuple , _UpperCAmelCase : Optional[Any] = None , _UpperCAmelCase : Tuple = 1 , _UpperCAmelCase : Any = 100 , _UpperCAmelCase : List[str] = 0.0 , _UpperCAmelCase : Optional[Any] = None , _UpperCAmelCase : Union[str, Any] = "pil" , _UpperCAmelCase : Any = True , ) -> Union[Tuple, ImagePipelineOutput]: """simple docstring""" if isinstance(__lowerCamelCase , PIL.Image.Image ): lowercase__ = 1 elif isinstance(__lowerCamelCase , torch.Tensor ): lowercase__ = image.shape[0] else: raise ValueError(f'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(__lowerCamelCase )}''' ) if isinstance(__lowerCamelCase , PIL.Image.Image ): lowercase__ = preprocess(__lowerCamelCase ) lowercase__ = image.shape[-2:] # in_channels should be 6: 3 for latents, 3 for low resolution image lowercase__ = (batch_size, self.unet.config.in_channels // 2, height, width) lowercase__ = next(self.unet.parameters() ).dtype lowercase__ = randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=self.device , dtype=__lowerCamelCase ) lowercase__ = image.to(device=self.device , dtype=__lowerCamelCase ) # set timesteps and move to the correct device self.scheduler.set_timesteps(__lowerCamelCase , device=self.device ) lowercase__ = self.scheduler.timesteps # scale the initial noise by the standard deviation required by the scheduler lowercase__ = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature. # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] lowercase__ = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() ) lowercase__ = {} if accepts_eta: lowercase__ = eta for t in self.progress_bar(__lowerCamelCase ): # concat latents and low resolution image in the channel dimension. lowercase__ = torch.cat([latents, image] , dim=1 ) lowercase__ = self.scheduler.scale_model_input(__lowerCamelCase , __lowerCamelCase ) # predict the noise residual lowercase__ = self.unet(__lowerCamelCase , __lowerCamelCase ).sample # compute the previous noisy sample x_t -> x_t-1 lowercase__ = self.scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample # decode the image latents with the VQVAE lowercase__ = self.vqvae.decode(__lowerCamelCase ).sample lowercase__ = torch.clamp(__lowerCamelCase , -1.0 , 1.0 ) lowercase__ = image / 2 + 0.5 lowercase__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": lowercase__ = self.numpy_to_pil(__lowerCamelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=__lowerCamelCase )
356
import gc import unittest from parameterized import parameterized from diffusers import FlaxUNetaDConditionModel from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp @slow @require_flax class A ( unittest.TestCase ): '''simple docstring''' def lowerCamelCase__ (self : Any , _UpperCAmelCase : List[str] , _UpperCAmelCase : Union[str, Any] ) -> int: """simple docstring""" return f'''gaussian_noise_s={seed}_shape={"_".join([str(_UpperCAmelCase ) for s in shape] )}.npy''' def lowerCamelCase__ (self : Any ) -> int: """simple docstring""" super().tearDown() gc.collect() def lowerCamelCase__ (self : List[Any] , _UpperCAmelCase : Any=0 , _UpperCAmelCase : Optional[Any]=(4, 4, 64, 64) , _UpperCAmelCase : Any=False ) -> List[Any]: """simple docstring""" lowercase__ = jnp.bfloataa if fpaa else jnp.floataa lowercase__ = jnp.array(load_hf_numpy(self.get_file_format(_UpperCAmelCase , _UpperCAmelCase ) ) , dtype=_UpperCAmelCase ) return image def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : str=False , _UpperCAmelCase : str="CompVis/stable-diffusion-v1-4" ) -> Dict: """simple docstring""" lowercase__ = jnp.bfloataa if fpaa else jnp.floataa lowercase__ = """bf16""" if fpaa else None lowercase__ , lowercase__ = FlaxUNetaDConditionModel.from_pretrained( _UpperCAmelCase , subfolder="""unet""" , dtype=_UpperCAmelCase , revision=_UpperCAmelCase ) return model, params def lowerCamelCase__ (self : List[Any] , _UpperCAmelCase : int=0 , _UpperCAmelCase : Optional[int]=(4, 77, 768) , _UpperCAmelCase : Any=False ) -> Optional[Any]: """simple docstring""" lowercase__ = jnp.bfloataa if fpaa else jnp.floataa lowercase__ = jnp.array(load_hf_numpy(self.get_file_format(_UpperCAmelCase , _UpperCAmelCase ) ) , dtype=_UpperCAmelCase ) return hidden_states @parameterized.expand( [ # fmt: off [83, 4, [-0.2_323, -0.1_304, 0.0_813, -0.3_093, -0.0_919, -0.1_571, -0.1_125, -0.5_806]], [17, 0.55, [-0.0_831, -0.2_443, 0.0_901, -0.0_919, 0.3_396, 0.0_103, -0.3_743, 0.0_701]], [8, 0.89, [-0.4_863, 0.0_859, 0.0_875, -0.1_658, 0.9_199, -0.0_114, 0.4_839, 0.4_639]], [3, 1000, [-0.5_649, 0.2_402, -0.5_518, 0.1_248, 1.1_328, -0.2_443, -0.0_325, -1.0_078]], # fmt: on ] ) def lowerCamelCase__ (self : int , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int ) -> str: """simple docstring""" lowercase__ , lowercase__ = self.get_unet_model(model_id="""CompVis/stable-diffusion-v1-4""" , fpaa=_UpperCAmelCase ) lowercase__ = self.get_latents(_UpperCAmelCase , fpaa=_UpperCAmelCase ) lowercase__ = self.get_encoder_hidden_states(_UpperCAmelCase , fpaa=_UpperCAmelCase ) lowercase__ = model.apply( {"""params""": params} , _UpperCAmelCase , jnp.array(_UpperCAmelCase , dtype=jnp.intaa ) , encoder_hidden_states=_UpperCAmelCase , ).sample assert sample.shape == latents.shape lowercase__ = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) lowercase__ = jnp.array(_UpperCAmelCase , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware assert jnp.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-2 ) @parameterized.expand( [ # fmt: off [83, 4, [0.1_514, 0.0_807, 0.1_624, 0.1_016, -0.1_896, 0.0_263, 0.0_677, 0.2_310]], [17, 0.55, [0.1_164, -0.0_216, 0.0_170, 0.1_589, -0.3_120, 0.1_005, -0.0_581, -0.1_458]], [8, 0.89, [-0.1_758, -0.0_169, 0.1_004, -0.1_411, 0.1_312, 0.1_103, -0.1_996, 0.2_139]], [3, 1000, [0.1_214, 0.0_352, -0.0_731, -0.1_562, -0.0_994, -0.0_906, -0.2_340, -0.0_539]], # fmt: on ] ) def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : List[str] ) -> str: """simple docstring""" lowercase__ , lowercase__ = self.get_unet_model(model_id="""stabilityai/stable-diffusion-2""" , fpaa=_UpperCAmelCase ) lowercase__ = self.get_latents(_UpperCAmelCase , shape=(4, 4, 96, 96) , fpaa=_UpperCAmelCase ) lowercase__ = self.get_encoder_hidden_states(_UpperCAmelCase , shape=(4, 77, 1024) , fpaa=_UpperCAmelCase ) lowercase__ = model.apply( {"""params""": params} , _UpperCAmelCase , jnp.array(_UpperCAmelCase , dtype=jnp.intaa ) , encoder_hidden_states=_UpperCAmelCase , ).sample assert sample.shape == latents.shape lowercase__ = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) lowercase__ = jnp.array(_UpperCAmelCase , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware assert jnp.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-2 )
146
0
from typing import Optional import pyspark from .. import Features, NamedSplit from ..download import DownloadMode from ..packaged_modules.spark.spark import Spark from .abc import AbstractDatasetReader class __snake_case ( lowerCamelCase_ ): def __init__( self : List[Any] , _lowercase : pyspark.sql.DataFrame , _lowercase : Optional[NamedSplit] = None , _lowercase : Optional[Features] = None , _lowercase : bool = True , _lowercase : str = None , _lowercase : bool = False , _lowercase : str = None , _lowercase : bool = True , _lowercase : str = "arrow" , **_lowercase : Any , ): """simple docstring""" super().__init__( split=_lowercase , features=_lowercase , cache_dir=_lowercase , keep_in_memory=_lowercase , streaming=_lowercase , **_lowercase , ) SCREAMING_SNAKE_CASE__ = load_from_cache_file SCREAMING_SNAKE_CASE__ = file_format SCREAMING_SNAKE_CASE__ = Spark( df=_lowercase , features=_lowercase , cache_dir=_lowercase , working_dir=_lowercase , **_lowercase , ) def __a ( self : int ): """simple docstring""" if self.streaming: return self.builder.as_streaming_dataset(split=self.split ) SCREAMING_SNAKE_CASE__ = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD self.builder.download_and_prepare( download_mode=_lowercase , file_format=self._file_format , ) return self.builder.as_dataset(split=self.split )
219
import unittest from pathlib import Path from shutil import copyfile from transformers import SPIECE_UNDERLINE, is_sentencepiece_available from transformers.models.speech_to_text import SpeechaTextTokenizer from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin __lowerCamelCase : str = get_tests_dir('''fixtures/test_sentencepiece.model''') if is_sentencepiece_available(): import sentencepiece as sp __lowerCamelCase : Any = 5 __lowerCamelCase : Dict = 10 @require_sentencepiece @require_tokenizers class __snake_case ( lowerCamelCase_ , unittest.TestCase ): lowerCAmelCase_ = SpeechaTextTokenizer lowerCAmelCase_ = False lowerCAmelCase_ = True def __a ( self : Tuple ): """simple docstring""" super().setUp() SCREAMING_SNAKE_CASE__ = sp.SentencePieceProcessor() spm_model.Load(_lowercase ) SCREAMING_SNAKE_CASE__ = ["""<s>""", """<pad>""", """</s>""", """<unk>"""] vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(_lowercase ) )] SCREAMING_SNAKE_CASE__ = dict(zip(_lowercase , range(len(_lowercase ) ) ) ) SCREAMING_SNAKE_CASE__ = Path(self.tmpdirname ) save_json(_lowercase , save_dir / VOCAB_FILES_NAMES["""vocab_file"""] ) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(_lowercase , save_dir / VOCAB_FILES_NAMES["""spm_file"""] ) SCREAMING_SNAKE_CASE__ = SpeechaTextTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def __a ( self : Union[str, Any] ): """simple docstring""" SCREAMING_SNAKE_CASE__ = """<pad>""" SCREAMING_SNAKE_CASE__ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase ) , _lowercase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase ) , _lowercase ) def __a ( self : List[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE__ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<s>""" ) self.assertEqual(vocab_keys[1] , """<pad>""" ) self.assertEqual(vocab_keys[-1] , """j""" ) self.assertEqual(len(_lowercase ) , 10_01 ) def __a ( self : List[Any] ): """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 10_01 ) def __a ( self : List[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE__ = SpeechaTextTokenizer.from_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE__ = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(_lowercase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(_lowercase ) , [2_89, 50, 14, 1_74, 3_86] , ) SCREAMING_SNAKE_CASE__ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( _lowercase , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """."""] , ) SCREAMING_SNAKE_CASE__ = tokenizer.convert_tokens_to_ids(_lowercase ) self.assertListEqual(_lowercase , [12, 25, 88, 59, 28, 23, 11, 4, 6_06, 3_51, 3_51, 3_51, 7, 16, 70, 50, 76, 84, 10, 4, 8] ) SCREAMING_SNAKE_CASE__ = tokenizer.convert_ids_to_tokens(_lowercase ) self.assertListEqual( _lowercase , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """."""] , ) @slow def __a ( self : List[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE__ = {"""input_ids""": [[37_91, 7_97, 31, 11, 64, 7_97, 31, 24_29, 4_33, 12, 11_76, 12, 20, 7_86, 9_15, 1_42, 24_13, 2_40, 37, 32_38, 7_97, 31, 11, 35, 93, 9_15, 1_42, 24_13, 2_40, 37, 55_40, 5_67, 12_76, 93, 37, 6_10, 40, 62, 4_55, 6_57, 10_42, 1_23, 7_80, 1_77, 37, 3_09, 2_41, 12_98, 5_14, 20, 2_92, 27_37, 1_14, 24_69, 2_41, 85, 64, 3_02, 5_48, 5_28, 4_23, 4, 5_09, 4_06, 4_23, 37, 6_01, 4, 7_77, 3_02, 5_48, 5_28, 4_23, 2_84, 4, 33_88, 5_11, 4_59, 4, 35_55, 40, 3_21, 3_02, 7_05, 4, 33_88, 5_11, 5_83, 3_26, 5, 5, 5, 62, 33_10, 5_60, 1_77, 26_80, 2_17, 15_08, 32, 31, 8_53, 4_18, 64, 5_83, 5_11, 16_05, 62, 35, 93, 5_60, 1_77, 26_80, 2_17, 15_08, 15_21, 64, 5_83, 5_11, 5_19, 62, 20, 15_15, 7_64, 20, 1_49, 2_61, 56_25, 79_72, 20, 55_40, 5_67, 12_76, 93, 39_25, 16_75, 11, 15, 8_02, 79_72, 5_76, 2_17, 15_08, 11, 35, 93, 12_53, 24_41, 15, 2_89, 6_52, 31, 4_16, 3_21, 38_42, 1_15, 40, 9_11, 8, 4_76, 6_19, 4, 3_80, 1_42, 4_23, 3_35, 2_40, 35, 93, 2_64, 8, 11, 3_35, 5_69, 4_20, 1_63, 5, 2], [2_60, 5_48, 5_28, 4_23, 20, 4_51, 20, 26_81, 11_53, 34_34, 20, 55_40, 37, 5_67, 1_26, 12_53, 24_41, 33_76, 4_49, 2_10, 4_31, 15_63, 1_77, 7_67, 55_40, 11, 12_03, 4_72, 11, 29_53, 6_85, 2_85, 3_64, 7_06, 11_53, 20, 67_99, 20, 28_69, 20, 44_64, 1_26, 40, 24_29, 20, 10_40, 8_66, 26_64, 4_18, 20, 3_18, 20, 17_26, 1_86, 20, 2_65, 5_22, 35, 93, 21_91, 46_34, 20, 10_40, 12, 67_99, 15, 2_28, 23_56, 1_42, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_75, 26_66, 6_84, 15_82, 11_76, 12, 6_27, 1_49, 6_19, 20, 49_02, 5_63, 11, 20, 1_49, 2_61, 34_20, 23_56, 1_74, 1_42, 47_14, 1_31, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_lowercase , model_name="""facebook/s2t-small-mustc-en-de-st""" , revision="""a14f04cf0776c02f62a8cb800cf7909e15ea23ad""" , ) @require_sentencepiece class __snake_case ( unittest.TestCase ): lowerCAmelCase_ = "valhalla/s2t_mustc_multilinguial_medium" lowerCAmelCase_ = "C'est trop cool" lowerCAmelCase_ = "Esto es genial" @classmethod def __a ( cls : Any ): """simple docstring""" SCREAMING_SNAKE_CASE__ = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name ) return cls def __a ( self : Dict ): """simple docstring""" self.assertEqual(self.tokenizer.lang_code_to_id["""pt"""] , 4 ) self.assertEqual(self.tokenizer.lang_code_to_id["""ru"""] , 6 ) self.assertEqual(self.tokenizer.lang_code_to_id["""it"""] , 9 ) self.assertEqual(self.tokenizer.lang_code_to_id["""de"""] , 11 ) def __a ( self : Union[str, Any] ): """simple docstring""" self.assertEqual(self.tokenizer.vocab_size , 1_00_00 ) def __a ( self : int ): """simple docstring""" self.assertIn(_lowercase , self.tokenizer.all_special_ids ) SCREAMING_SNAKE_CASE__ = [ES_CODE, 4, 16_01, 47, 76_47, 2] SCREAMING_SNAKE_CASE__ = self.tokenizer.decode(_lowercase , skip_special_tokens=_lowercase ) SCREAMING_SNAKE_CASE__ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_lowercase ) self.assertEqual(_lowercase , _lowercase ) self.assertNotIn(self.tokenizer.eos_token , _lowercase ) def __a ( self : Any ): """simple docstring""" SCREAMING_SNAKE_CASE__ = """fr""" SCREAMING_SNAKE_CASE__ = self.tokenizer(self.french_text ).input_ids self.assertEqual(encoded[0] , _lowercase ) self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id ) def __a ( self : Optional[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE__ = """fr""" self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] ) SCREAMING_SNAKE_CASE__ = """es""" self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
219
1
'''simple docstring''' class A__ : """simple docstring""" def __init__( self : Dict , lowerCAmelCase__ : Tuple ) -> str: """simple docstring""" _UpperCAmelCase : str = arr.split("," ) def _lowerCAmelCase ( self : str ) -> Optional[int]: """simple docstring""" _UpperCAmelCase : List[str] = [int(self.array[0] )] * len(self.array ) _UpperCAmelCase : str = [int(self.array[0] )] * len(self.array ) for i in range(1 , len(self.array ) ): _UpperCAmelCase : Optional[Any] = max( int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) ) _UpperCAmelCase : Tuple = max(sum_value[i] , rear[i - 1] ) return rear[len(self.array ) - 1] if __name__ == "__main__": __a = input('please input some numbers:') __a = SubArray(whole_array) __a = array.solve_sub_array() print(('the results is:', re))
367
'''simple docstring''' from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging __a = logging.get_logger(__name__) __a = { 'huggingface/time-series-transformer-tourism-monthly': ( 'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json' ), # See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer } class A__ ( UpperCamelCase ): """simple docstring""" UpperCamelCase_ : Tuple = '''time_series_transformer''' UpperCamelCase_ : Optional[Any] = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', '''num_hidden_layers''': '''encoder_layers''', } def __init__( self : Optional[int] , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : str = "student_t" , lowerCAmelCase__ : str = "nll" , lowerCAmelCase__ : int = 1 , lowerCAmelCase__ : List[int] = [1, 2, 3, 4, 5, 6, 7] , lowerCAmelCase__ : Optional[Union[str, bool]] = "mean" , lowerCAmelCase__ : int = 0 , lowerCAmelCase__ : int = 0 , lowerCAmelCase__ : int = 0 , lowerCAmelCase__ : int = 0 , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : int = 3_2 , lowerCAmelCase__ : int = 3_2 , lowerCAmelCase__ : int = 2 , lowerCAmelCase__ : int = 2 , lowerCAmelCase__ : int = 2 , lowerCAmelCase__ : int = 2 , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : str = "gelu" , lowerCAmelCase__ : int = 6_4 , lowerCAmelCase__ : float = 0.1 , lowerCAmelCase__ : float = 0.1 , lowerCAmelCase__ : float = 0.1 , lowerCAmelCase__ : float = 0.1 , lowerCAmelCase__ : float = 0.1 , lowerCAmelCase__ : int = 1_0_0 , lowerCAmelCase__ : float = 0.02 , lowerCAmelCase__ : Dict=True , **lowerCAmelCase__ : Tuple , ) -> Tuple: """simple docstring""" _UpperCAmelCase : Optional[int] = prediction_length _UpperCAmelCase : Optional[Any] = context_length or prediction_length _UpperCAmelCase : Optional[Any] = distribution_output _UpperCAmelCase : Union[str, Any] = loss _UpperCAmelCase : Dict = input_size _UpperCAmelCase : int = num_time_features _UpperCAmelCase : Any = lags_sequence _UpperCAmelCase : Dict = scaling _UpperCAmelCase : Tuple = num_dynamic_real_features _UpperCAmelCase : Dict = num_static_real_features _UpperCAmelCase : Union[str, Any] = num_static_categorical_features if cardinality and num_static_categorical_features > 0: if len(lowerCAmelCase__ ) != num_static_categorical_features: raise ValueError( "The cardinality should be a list of the same length as `num_static_categorical_features`" ) _UpperCAmelCase : Optional[int] = cardinality else: _UpperCAmelCase : Optional[Any] = [0] if embedding_dimension and num_static_categorical_features > 0: if len(lowerCAmelCase__ ) != num_static_categorical_features: raise ValueError( "The embedding dimension should be a list of the same length as `num_static_categorical_features`" ) _UpperCAmelCase : List[Any] = embedding_dimension else: _UpperCAmelCase : Optional[Any] = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality] _UpperCAmelCase : str = num_parallel_samples # Transformer architecture configuration _UpperCAmelCase : Union[str, Any] = input_size * len(lowerCAmelCase__ ) + self._number_of_features _UpperCAmelCase : str = d_model _UpperCAmelCase : Optional[Any] = encoder_attention_heads _UpperCAmelCase : Dict = decoder_attention_heads _UpperCAmelCase : List[Any] = encoder_ffn_dim _UpperCAmelCase : str = decoder_ffn_dim _UpperCAmelCase : Dict = encoder_layers _UpperCAmelCase : str = decoder_layers _UpperCAmelCase : Any = dropout _UpperCAmelCase : str = attention_dropout _UpperCAmelCase : List[Any] = activation_dropout _UpperCAmelCase : Dict = encoder_layerdrop _UpperCAmelCase : Any = decoder_layerdrop _UpperCAmelCase : Optional[Any] = activation_function _UpperCAmelCase : Tuple = init_std _UpperCAmelCase : List[str] = use_cache super().__init__(is_encoder_decoder=lowerCAmelCase__ , **lowerCAmelCase__ ) @property def _lowerCAmelCase ( self : str ) -> int: """simple docstring""" return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
17
0
def lowerCAmelCase__ ( lowerCamelCase_ : int = 1000000): '''simple docstring''' lowerCAmelCase__ : Tuple = 1 lowerCAmelCase__ : Union[str, Any] = 1 lowerCAmelCase__ : Optional[int] = {1: 1} for inputa in range(2 ,lowerCamelCase_): lowerCAmelCase__ : Union[str, Any] = 0 lowerCAmelCase__ : Dict = inputa while True: if number in counters: counter += counters[number] break if number % 2 == 0: number //= 2 counter += 1 else: lowerCAmelCase__ : Tuple = (3 * number) + 1 counter += 1 if inputa not in counters: lowerCAmelCase__ : Union[str, Any] = counter if counter > pre_counter: lowerCAmelCase__ : int = inputa lowerCAmelCase__ : List[Any] = counter return largest_number if __name__ == "__main__": print(solution(int(input().strip())))
129
from jiwer import compute_measures import datasets __snake_case : Dict ='\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n' __snake_case : Optional[Any] ='\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n' __snake_case : Any ='\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> wer = datasets.load_metric("wer")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class lowerCamelCase__ ( datasets.Metric): '''simple docstring''' def lowerCAmelCase__ (self ) -> Optional[int]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { '''predictions''': datasets.Value('''string''' ,id='''sequence''' ), '''references''': datasets.Value('''string''' ,id='''sequence''' ), } ) ,codebase_urls=['''https://github.com/jitsi/jiwer/'''] ,reference_urls=[ '''https://en.wikipedia.org/wiki/Word_error_rate''', ] ,) def lowerCAmelCase__ (self ,__lowerCamelCase=None ,__lowerCamelCase=None ,__lowerCamelCase=False ) -> Any: """simple docstring""" if concatenate_texts: return compute_measures(__lowerCamelCase ,__lowerCamelCase )["wer"] else: lowerCAmelCase__ : str = 0 lowerCAmelCase__ : Tuple = 0 for prediction, reference in zip(__lowerCamelCase ,__lowerCamelCase ): lowerCAmelCase__ : Dict = compute_measures(__lowerCamelCase ,__lowerCamelCase ) incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"] total += measures["substitutions"] + measures["deletions"] + measures["hits"] return incorrect / total
129
1
def __lowercase ( a__ ) -> list: def merge(a__ , a__ ) -> list: def _merge(): while left and right: yield (left if left[0] <= right[0] else right).pop(0 ) yield from left yield from right return list(_merge() ) if len(a__ ) <= 1: return collection __SCREAMING_SNAKE_CASE = len(a__ ) // 2 return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) ) if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase__ : Optional[Any] =input('''Enter numbers separated by a comma:\n''').strip() lowerCAmelCase__ : Any =[int(item) for item in user_input.split(''',''')] print(*merge_sort(unsorted), sep=''',''')
118
from dataclasses import dataclass, field from typing import Optional from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser @dataclass class UpperCAmelCase_ : '''simple docstring''' UpperCamelCase__ : str = field( metadata={'''help''': '''The output directory where the model will be written.'''} , ) UpperCamelCase__ : str = field( metadata={ '''help''': ( '''The encoder model checkpoint for weights initialization.''' '''Don\'t set if you want to train an encoder model from scratch.''' ) } , ) UpperCamelCase__ : str = field( metadata={ '''help''': ( '''The decoder model checkpoint for weights initialization.''' '''Don\'t set if you want to train a decoder model from scratch.''' ) } , ) UpperCamelCase__ : Optional[str] = field( default=UpperCamelCase_ , metadata={'''help''': '''Pretrained encoder config name or path if not the same as encoder_model_name'''} ) UpperCamelCase__ : Optional[str] = field( default=UpperCamelCase_ , metadata={'''help''': '''Pretrained decoder config name or path if not the same as decoder_model_name'''} ) def __lowercase ( ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments,) ) ((__SCREAMING_SNAKE_CASE) , ) = parser.parse_args_into_dataclasses() # Load pretrained model and tokenizer # Use explicit specified encoder config if model_args.encoder_config_name: __SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(model_args.encoder_config_name ) # Use pretrained encoder model's config else: __SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path ) # Use explicit specified decoder config if model_args.decoder_config_name: __SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(model_args.decoder_config_name ) # Use pretrained decoder model's config else: __SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path ) # necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained( encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=a__ , decoder_config=a__ , ) # GPT2 only has bos/eos tokens but not decoder_start/pad tokens __SCREAMING_SNAKE_CASE = decoder_config.decoder_start_token_id __SCREAMING_SNAKE_CASE = decoder_config.pad_token_id if decoder_start_token_id is None: __SCREAMING_SNAKE_CASE = decoder_config.bos_token_id if pad_token_id is None: __SCREAMING_SNAKE_CASE = decoder_config.eos_token_id # This is necessary to make Flax's generate() work __SCREAMING_SNAKE_CASE = decoder_config.eos_token_id __SCREAMING_SNAKE_CASE = decoder_start_token_id __SCREAMING_SNAKE_CASE = pad_token_id __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path ) __SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path ) __SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(model.config.pad_token_id ) model.save_pretrained(model_args.output_dir ) image_processor.save_pretrained(model_args.output_dir ) tokenizer.save_pretrained(model_args.output_dir ) if __name__ == "__main__": main()
118
1
import os # Precomputes a list of the 100 first triangular numbers UpperCAmelCase__ = [int(0.5 * n * (n + 1)) for n in range(1, 101)] def UpperCAmelCase_ ( ) -> Tuple: """simple docstring""" _lowercase =os.path.dirname(os.path.realpath(__snake_case ) ) _lowercase =os.path.join(__snake_case , '''words.txt''' ) _lowercase ='''''' with open(__snake_case ) as f: _lowercase =f.readline() _lowercase =[word.strip('''"''' ) for word in words.strip('''\r\n''' ).split(''',''' )] _lowercase =[ word for word in [sum(ord(__snake_case ) - 64 for x in word ) for word in words] if word in TRIANGULAR_NUMBERS ] return len(__snake_case ) if __name__ == "__main__": print(solution())
5
from typing import TYPE_CHECKING from ..utils import _LazyModule UpperCAmelCase__ = { '''config''': [ '''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''', '''OnnxConfig''', '''OnnxConfigWithPast''', '''OnnxSeq2SeqConfigWithPast''', '''PatchingSpec''', ], '''convert''': ['''export''', '''validate_model_outputs'''], '''features''': ['''FeaturesManager'''], '''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''], } if TYPE_CHECKING: from .config import ( EXTERNAL_DATA_FORMAT_SIZE_LIMIT, OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast, PatchingSpec, ) from .convert import export, validate_model_outputs from .features import FeaturesManager from .utils import ParameterFormat, compute_serialized_parameters_size else: import sys UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
5
1
from math import ceil def lowerCamelCase__ ( _lowercase = 1001 ): '''simple docstring''' UpperCAmelCase_ : Optional[Any] = 1 for i in range(1 , int(ceil(n / 2.0 ) ) ): UpperCAmelCase_ : List[Any] = 2 * i + 1 UpperCAmelCase_ : Optional[int] = 2 * i UpperCAmelCase_ : Any = total + 4 * odd**2 - 6 * even return total if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution()) else: try: __a = int(sys.argv[1]) print(solution(n)) except ValueError: print('Invalid entry - please enter a number')
235
import argparse from collections import OrderedDict from pathlib import Path import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision.transforms import functional as F from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection from transformers.utils import logging logging.set_verbosity_info() __a = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) __a = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""") ) rename_keys.append( (F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight""")) rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias""")) rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight""")) rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias""")) rename_keys.append( (F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias""")) rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight""")) rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias""")) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""") ) rename_keys.append( (F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""") ) rename_keys.append( ( F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""", F"""decoder.layers.{i}.encoder_attn.out_proj.weight""", ) ) rename_keys.append( ( F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""", F"""decoder.layers.{i}.encoder_attn.out_proj.bias""", ) ) rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight""")) rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias""")) rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight""")) rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias""")) rename_keys.append( (F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias""")) rename_keys.append( (F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""") ) rename_keys.append( (F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight""")) rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias""")) # convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads rename_keys.extend( [ ('input_proj.weight', 'input_projection.weight'), ('input_proj.bias', 'input_projection.bias'), ('query_embed.weight', 'query_position_embeddings.weight'), ('transformer.encoder.norm.weight', 'encoder.layernorm.weight'), ('transformer.encoder.norm.bias', 'encoder.layernorm.bias'), ('transformer.decoder.norm.weight', 'decoder.layernorm.weight'), ('transformer.decoder.norm.bias', 'decoder.layernorm.bias'), ('class_embed.weight', 'class_labels_classifier.weight'), ('class_embed.bias', 'class_labels_classifier.bias'), ('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'), ('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'), ('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'), ('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'), ('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'), ('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'), ] ) def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase ): '''simple docstring''' UpperCAmelCase_ : int = state_dict.pop(_lowercase ) UpperCAmelCase_ : Optional[int] = val def lowerCamelCase__ ( _lowercase ): '''simple docstring''' UpperCAmelCase_ : str = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: UpperCAmelCase_ : List[Any] = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' ) UpperCAmelCase_ : Optional[Any] = value else: UpperCAmelCase_ : Union[str, Any] = value return new_state_dict def lowerCamelCase__ ( _lowercase ): '''simple docstring''' UpperCAmelCase_ : int = '''''' # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) UpperCAmelCase_ : Dict = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' ) UpperCAmelCase_ : Any = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict UpperCAmelCase_ : Union[str, Any] = in_proj_weight[:256, :] UpperCAmelCase_ : Optional[int] = in_proj_bias[:256] UpperCAmelCase_ : Tuple = in_proj_weight[256:512, :] UpperCAmelCase_ : List[Any] = in_proj_bias[256:512] UpperCAmelCase_ : Union[str, Any] = in_proj_weight[-256:, :] UpperCAmelCase_ : str = in_proj_bias[-256:] # next: transformer decoder (which is a bit more complex because it also includes cross-attention) for i in range(6 ): # read in weights + bias of input projection layer of self-attention UpperCAmelCase_ : Any = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' ) UpperCAmelCase_ : Optional[int] = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict UpperCAmelCase_ : Union[str, Any] = in_proj_weight[:256, :] UpperCAmelCase_ : List[str] = in_proj_bias[:256] UpperCAmelCase_ : Optional[int] = in_proj_weight[256:512, :] UpperCAmelCase_ : str = in_proj_bias[256:512] UpperCAmelCase_ : Optional[Any] = in_proj_weight[-256:, :] UpperCAmelCase_ : Union[str, Any] = in_proj_bias[-256:] # read in weights + bias of input projection layer of cross-attention UpperCAmelCase_ : List[str] = state_dict.pop( f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' ) UpperCAmelCase_ : List[Any] = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) of cross-attention to the state dict UpperCAmelCase_ : List[str] = in_proj_weight_cross_attn[:256, :] UpperCAmelCase_ : str = in_proj_bias_cross_attn[:256] UpperCAmelCase_ : int = in_proj_weight_cross_attn[256:512, :] UpperCAmelCase_ : Tuple = in_proj_bias_cross_attn[256:512] UpperCAmelCase_ : List[str] = in_proj_weight_cross_attn[-256:, :] UpperCAmelCase_ : Dict = in_proj_bias_cross_attn[-256:] def lowerCamelCase__ ( _lowercase , _lowercase ): '''simple docstring''' UpperCAmelCase_, UpperCAmelCase_ : List[Any] = image.size UpperCAmelCase_ : List[Any] = max(_lowercase , _lowercase ) UpperCAmelCase_ : Dict = 800 if '''detection''' in checkpoint_url else 1000 UpperCAmelCase_ : Any = target_max_size / current_max_size UpperCAmelCase_ : Any = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) ) return resized_image def lowerCamelCase__ ( _lowercase ): '''simple docstring''' UpperCAmelCase_ : Optional[int] = F.to_tensor(_lowercase ) UpperCAmelCase_ : Optional[Any] = F.normalize(_lowercase , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ) return image @torch.no_grad() def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase ): '''simple docstring''' logger.info('''Converting model...''' ) # load original state dict UpperCAmelCase_ : Any = torch.hub.load_state_dict_from_url(_lowercase , map_location='''cpu''' ) # rename keys for src, dest in rename_keys: rename_key(_lowercase , _lowercase , _lowercase ) UpperCAmelCase_ : Optional[int] = rename_backbone_keys(_lowercase ) # query, key and value matrices need special treatment read_in_q_k_v(_lowercase ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them UpperCAmelCase_ : int = '''model.''' for key in state_dict.copy().keys(): if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ): UpperCAmelCase_ : int = state_dict.pop(_lowercase ) UpperCAmelCase_ : Dict = val # create HuggingFace model and load state dict UpperCAmelCase_ : str = TableTransformerConfig( backbone='''resnet18''' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , ) if "detection" in checkpoint_url: UpperCAmelCase_ : Any = 15 UpperCAmelCase_ : List[str] = 2 UpperCAmelCase_ : Union[str, Any] = {0: '''table''', 1: '''table rotated'''} UpperCAmelCase_ : str = idalabel UpperCAmelCase_ : Optional[int] = {v: k for k, v in idalabel.items()} else: UpperCAmelCase_ : Optional[Any] = 125 UpperCAmelCase_ : Optional[Any] = 6 UpperCAmelCase_ : Dict = { 0: '''table''', 1: '''table column''', 2: '''table row''', 3: '''table column header''', 4: '''table projected row header''', 5: '''table spanning cell''', } UpperCAmelCase_ : Optional[Any] = idalabel UpperCAmelCase_ : Optional[int] = {v: k for k, v in idalabel.items()} UpperCAmelCase_ : Union[str, Any] = DetrImageProcessor( format='''coco_detection''' , max_size=800 if '''detection''' in checkpoint_url else 1000 ) UpperCAmelCase_ : Union[str, Any] = TableTransformerForObjectDetection(_lowercase ) model.load_state_dict(_lowercase ) model.eval() # verify our conversion UpperCAmelCase_ : str = '''example_pdf.png''' if '''detection''' in checkpoint_url else '''example_table.png''' UpperCAmelCase_ : Dict = hf_hub_download(repo_id='''nielsr/example-pdf''' , repo_type='''dataset''' , filename=_lowercase ) UpperCAmelCase_ : Dict = Image.open(_lowercase ).convert('''RGB''' ) UpperCAmelCase_ : Any = normalize(resize(_lowercase , _lowercase ) ).unsqueeze(0 ) UpperCAmelCase_ : Dict = model(_lowercase ) if "detection" in checkpoint_url: UpperCAmelCase_ : Any = (1, 15, 3) UpperCAmelCase_ : Optional[int] = torch.tensor( [[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] ) UpperCAmelCase_ : Any = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] ) else: UpperCAmelCase_ : Any = (1, 125, 7) UpperCAmelCase_ : Any = torch.tensor( [[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] ) UpperCAmelCase_ : str = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, :3, :3] , _lowercase , atol=1E-4 ) assert torch.allclose(outputs.pred_boxes[0, :3, :3] , _lowercase , atol=1E-4 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: # Save model and image processor logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' ) Path(_lowercase ).mkdir(exist_ok=_lowercase ) model.save_pretrained(_lowercase ) image_processor.save_pretrained(_lowercase ) if push_to_hub: # Push model to HF hub logger.info('''Pushing model to the hub...''' ) UpperCAmelCase_ : List[Any] = ( '''microsoft/table-transformer-detection''' if '''detection''' in checkpoint_url else '''microsoft/table-transformer-structure-recognition''' ) model.push_to_hub(_lowercase ) image_processor.push_to_hub(_lowercase ) if __name__ == "__main__": __a = argparse.ArgumentParser() parser.add_argument( '--checkpoint_url', default='https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth', type=str, choices=[ 'https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth', 'https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth', ], help='URL of the Table Transformer checkpoint you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) __a = parser.parse_args() convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
235
1